mirror of
https://github.com/MariaDB/server.git
synced 2025-09-11 05:52:26 +03:00
merge with 5.1
This commit is contained in:
@@ -1,3 +1,34 @@
|
||||
2010-08-24 The InnoDB Team
|
||||
|
||||
* handler/ha_innodb.c, dict/dict0dict.c:
|
||||
Fix Bug #55832 selects crash too easily when innodb_force_recovery>3
|
||||
|
||||
2010-08-03 The InnoDB Team
|
||||
|
||||
* include/dict0dict.h, include/dict0dict.ic, row/row0mysql.c:
|
||||
Fix bug #54678, InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock
|
||||
|
||||
2010-08-03 The InnoDB Team
|
||||
|
||||
* dict/dict0load.c, handler/ha_innodb.cc, include/db0err.h,
|
||||
include/dict0load.h, include/dict0mem.h, include/que0que.h,
|
||||
row/row0merge.c, row/row0mysql.c:
|
||||
Fix Bug#54582 stack overflow when opening many tables linked
|
||||
with foreign keys at once
|
||||
|
||||
2010-08-03 The InnoDB Team
|
||||
|
||||
* include/ut0mem.h, ut/ut0mem.c:
|
||||
Fix Bug #55627 segv in ut_free pars_lexer_close innobase_shutdown
|
||||
innodb-use-sys-malloc=0
|
||||
|
||||
2010-08-01 The InnoDB Team
|
||||
|
||||
* handler/ha_innodb.cc
|
||||
Fix Bug #55382 Assignment with SELECT expressions takes unexpected
|
||||
S locks in READ COMMITTED
|
||||
>>>>>>> MERGE-SOURCE
|
||||
|
||||
2010-07-27 The InnoDB Team
|
||||
|
||||
* include/mem0pool.h, mem/mem0mem.c, mem/mem0pool.c, srv/srv0start.c:
|
||||
|
@@ -3484,9 +3484,10 @@ btr_cur_set_ownership_of_extern_field(
|
||||
Marks not updated extern fields as not-owned by this record. The ownership
|
||||
is transferred to the updated record which is inserted elsewhere in the
|
||||
index tree. In purge only the owner of externally stored field is allowed
|
||||
to free the field. */
|
||||
to free the field.
|
||||
@return TRUE if BLOB ownership was transferred */
|
||||
UNIV_INTERN
|
||||
void
|
||||
ibool
|
||||
btr_cur_mark_extern_inherited_fields(
|
||||
/*=================================*/
|
||||
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
|
||||
@@ -3500,13 +3501,14 @@ btr_cur_mark_extern_inherited_fields(
|
||||
ulint n;
|
||||
ulint j;
|
||||
ulint i;
|
||||
ibool change_ownership = FALSE;
|
||||
|
||||
ut_ad(rec_offs_validate(rec, NULL, offsets));
|
||||
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
|
||||
|
||||
if (!rec_offs_any_extern(offsets)) {
|
||||
|
||||
return;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
n = rec_offs_n_fields(offsets);
|
||||
@@ -3529,10 +3531,14 @@ btr_cur_mark_extern_inherited_fields(
|
||||
|
||||
btr_cur_set_ownership_of_extern_field(
|
||||
page_zip, rec, index, offsets, i, FALSE, mtr);
|
||||
|
||||
change_ownership = TRUE;
|
||||
updated:
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
return(change_ownership);
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
|
@@ -1734,6 +1734,7 @@ function_exit:
|
||||
}
|
||||
}
|
||||
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/********************************************************************//**
|
||||
Validates the search system.
|
||||
@return TRUE if ok */
|
||||
@@ -1897,3 +1898,4 @@ btr_search_validate(void)
|
||||
|
||||
return(ok);
|
||||
}
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
|
@@ -568,8 +568,7 @@ dict_table_get_on_id(
|
||||
{
|
||||
dict_table_t* table;
|
||||
|
||||
if (ut_dulint_cmp(table_id, DICT_FIELDS_ID) <= 0
|
||||
|| trx->dict_operation_lock_mode == RW_X_LATCH) {
|
||||
if (trx->dict_operation_lock_mode == RW_X_LATCH) {
|
||||
|
||||
/* Note: An X latch implies that the transaction
|
||||
already owns the dictionary mutex. */
|
||||
@@ -4192,7 +4191,6 @@ dict_update_statistics_low(
|
||||
dictionary mutex */
|
||||
{
|
||||
dict_index_t* index;
|
||||
ulint size;
|
||||
ulint sum_of_index_sizes = 0;
|
||||
|
||||
if (table->ibd_file_missing) {
|
||||
@@ -4207,14 +4205,6 @@ dict_update_statistics_low(
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we have set a high innodb_force_recovery level, do not calculate
|
||||
statistics, as a badly corrupted index can cause a crash in it. */
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find out the sizes of the indexes and how many different values
|
||||
for the key they approximately have */
|
||||
|
||||
@@ -4226,26 +4216,48 @@ dict_update_statistics_low(
|
||||
return;
|
||||
}
|
||||
|
||||
while (index) {
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE);
|
||||
|
||||
index->stat_index_size = size;
|
||||
do {
|
||||
if (UNIV_LIKELY
|
||||
(srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
|
||||
|| (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
|
||||
&& dict_index_is_clust(index)))) {
|
||||
ulint size;
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE);
|
||||
|
||||
sum_of_index_sizes += size;
|
||||
index->stat_index_size = size;
|
||||
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES);
|
||||
sum_of_index_sizes += size;
|
||||
|
||||
if (size == 0) {
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES);
|
||||
|
||||
if (size == 0) {
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
} else {
|
||||
/* If we have set a high innodb_force_recovery
|
||||
level, do not calculate statistics, as a badly
|
||||
corrupted index can cause a crash in it.
|
||||
Initialize some bogus index cardinality
|
||||
statistics, so that the data can be queried in
|
||||
various means, also via secondary indexes. */
|
||||
ulint i;
|
||||
|
||||
sum_of_index_sizes++;
|
||||
index->stat_index_size = index->stat_n_leaf_pages = 1;
|
||||
|
||||
for (i = dict_index_get_n_unique(index); i; ) {
|
||||
index->stat_n_diff_key_vals[i--] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
|
||||
index = dict_table_get_next_index(index);
|
||||
}
|
||||
} while (index);
|
||||
|
||||
index = dict_table_get_first_index(table);
|
||||
|
||||
|
@@ -1009,16 +1009,27 @@ err_exit:
|
||||
|
||||
err = dict_load_indexes(table, heap);
|
||||
|
||||
/* Initialize table foreign_child value. Its value could be
|
||||
changed when dict_load_foreigns() is called below */
|
||||
table->fk_max_recusive_level = 0;
|
||||
|
||||
/* If the force recovery flag is set, we open the table irrespective
|
||||
of the error condition, since the user may want to dump data from the
|
||||
clustered index. However we load the foreign key information only if
|
||||
all indexes were loaded. */
|
||||
if (err == DB_SUCCESS) {
|
||||
err = dict_load_foreigns(table->name, TRUE);
|
||||
err = dict_load_foreigns(table->name, TRUE, TRUE);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
dict_table_remove_from_cache(table);
|
||||
table = NULL;
|
||||
}
|
||||
} else if (!srv_force_recovery) {
|
||||
dict_table_remove_from_cache(table);
|
||||
table = NULL;
|
||||
}
|
||||
|
||||
table->fk_max_recusive_level = 0;
|
||||
#if 0
|
||||
if (err != DB_SUCCESS && table != NULL) {
|
||||
|
||||
@@ -1241,8 +1252,12 @@ dict_load_foreign(
|
||||
/*==============*/
|
||||
const char* id, /*!< in: foreign constraint id as a
|
||||
null-terminated string */
|
||||
ibool check_charsets)
|
||||
ibool check_charsets,
|
||||
/*!< in: TRUE=check charset compatibility */
|
||||
ibool check_recursive)
|
||||
/*!< in: Whether to record the foreign table
|
||||
parent count to avoid unlimited recursive
|
||||
load of chained foreign tables */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
dict_table_t* sys_foreign;
|
||||
@@ -1256,6 +1271,8 @@ dict_load_foreign(
|
||||
ulint len;
|
||||
ulint n_fields_and_type;
|
||||
mtr_t mtr;
|
||||
dict_table_t* for_table;
|
||||
dict_table_t* ref_table;
|
||||
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
@@ -1340,11 +1357,54 @@ dict_load_foreign(
|
||||
|
||||
dict_load_foreign_cols(id, foreign);
|
||||
|
||||
/* If the foreign table is not yet in the dictionary cache, we
|
||||
have to load it so that we are able to make type comparisons
|
||||
in the next function call. */
|
||||
ref_table = dict_table_check_if_in_cache_low(
|
||||
foreign->referenced_table_name);
|
||||
|
||||
dict_table_get_low(foreign->foreign_table_name);
|
||||
/* We could possibly wind up in a deep recursive calls if
|
||||
we call dict_table_get_low() again here if there
|
||||
is a chain of tables concatenated together with
|
||||
foreign constraints. In such case, each table is
|
||||
both a parent and child of the other tables, and
|
||||
act as a "link" in such table chains.
|
||||
To avoid such scenario, we would need to check the
|
||||
number of ancesters the current table has. If that
|
||||
exceeds DICT_FK_MAX_CHAIN_LEN, we will stop loading
|
||||
the child table.
|
||||
Foreign constraints are loaded in a Breath First fashion,
|
||||
that is, the index on FOR_NAME is scanned first, and then
|
||||
index on REF_NAME. So foreign constrains in which
|
||||
current table is a child (foreign table) are loaded first,
|
||||
and then those constraints where current table is a
|
||||
parent (referenced) table.
|
||||
Thus we could check the parent (ref_table) table's
|
||||
reference count (fk_max_recusive_level) to know how deep the
|
||||
recursive call is. If the parent table (ref_table) is already
|
||||
loaded, and its fk_max_recusive_level is larger than
|
||||
DICT_FK_MAX_CHAIN_LEN, we will stop the recursive loading
|
||||
by skipping loading the child table. It will not affect foreign
|
||||
constraint check for DMLs since child table will be loaded
|
||||
at that time for the constraint check. */
|
||||
if (!ref_table
|
||||
|| ref_table->fk_max_recusive_level < DICT_FK_MAX_RECURSIVE_LOAD) {
|
||||
|
||||
/* If the foreign table is not yet in the dictionary cache, we
|
||||
have to load it so that we are able to make type comparisons
|
||||
in the next function call. */
|
||||
|
||||
for_table = dict_table_get_low(foreign->foreign_table_name);
|
||||
|
||||
if (for_table && ref_table && check_recursive) {
|
||||
/* This is to record the longest chain of ancesters
|
||||
this table has, if the parent has more ancesters
|
||||
than this table has, record it after add 1 (for this
|
||||
parent */
|
||||
if (ref_table->fk_max_recusive_level
|
||||
>= for_table->fk_max_recusive_level) {
|
||||
for_table->fk_max_recusive_level =
|
||||
ref_table->fk_max_recusive_level + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Note that there may already be a foreign constraint object in
|
||||
the dictionary cache for this constraint: then the following
|
||||
@@ -1369,6 +1429,8 @@ ulint
|
||||
dict_load_foreigns(
|
||||
/*===============*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
ibool check_recursive,/*!< in: Whether to check recursive
|
||||
load of tables chained by FK */
|
||||
ibool check_charsets) /*!< in: TRUE=check charset
|
||||
compatibility */
|
||||
{
|
||||
@@ -1470,7 +1532,7 @@ loop:
|
||||
|
||||
/* Load the foreign constraint definition to the dictionary cache */
|
||||
|
||||
err = dict_load_foreign(id, check_charsets);
|
||||
err = dict_load_foreign(id, check_charsets, check_recursive);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
btr_pcur_close(&pcur);
|
||||
@@ -1498,6 +1560,11 @@ load_next_index:
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
/* Switch to scan index on REF_NAME, fk_max_recusive_level
|
||||
already been updated when scanning FOR_NAME index, no need to
|
||||
update again */
|
||||
check_recursive = FALSE;
|
||||
|
||||
goto start_load;
|
||||
}
|
||||
|
||||
|
@@ -354,6 +354,7 @@ ha_remove_all_nodes_to_page(
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/*************************************************************//**
|
||||
Validates a given range of the cells in hash table.
|
||||
@return TRUE if ok */
|
||||
@@ -400,6 +401,7 @@ ha_validate(
|
||||
|
||||
return(ok);
|
||||
}
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
|
||||
/*************************************************************//**
|
||||
Prints info of a hash table. */
|
||||
|
@@ -767,6 +767,19 @@ convert_error_code_to_mysql(
|
||||
case DB_INTERRUPTED:
|
||||
my_error(ER_QUERY_INTERRUPTED, MYF(0));
|
||||
/* fall through */
|
||||
|
||||
case DB_FOREIGN_EXCEED_MAX_CASCADE:
|
||||
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
HA_ERR_ROW_IS_REFERENCED,
|
||||
"InnoDB: Cannot delete/update "
|
||||
"rows with cascading foreign key "
|
||||
"constraints that exceed max "
|
||||
"depth of %d. Please "
|
||||
"drop extra constraints and try "
|
||||
"again", DICT_FK_MAX_RECURSIVE_LOAD);
|
||||
|
||||
/* fall through */
|
||||
|
||||
case DB_ERROR:
|
||||
default:
|
||||
return(-1); /* unspecified error */
|
||||
@@ -3348,12 +3361,19 @@ ha_innobase::innobase_initialize_autoinc()
|
||||
err = row_search_max_autoinc(index, col_name, &read_auto_inc);
|
||||
|
||||
switch (err) {
|
||||
case DB_SUCCESS:
|
||||
/* At the this stage we do not know the increment
|
||||
or the offset, so use a default increment of 1. */
|
||||
auto_inc = read_auto_inc + 1;
|
||||
break;
|
||||
case DB_SUCCESS: {
|
||||
ulonglong col_max_value;
|
||||
|
||||
col_max_value = innobase_get_int_col_max_value(field);
|
||||
|
||||
/* At the this stage we do not know the increment
|
||||
nor the offset, so use a default increment of 1. */
|
||||
|
||||
auto_inc = innobase_next_autoinc(
|
||||
read_auto_inc, 1, 1, col_max_value);
|
||||
|
||||
break;
|
||||
}
|
||||
case DB_RECORD_NOT_FOUND:
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr, " InnoDB: MySQL and InnoDB data "
|
||||
@@ -3648,8 +3668,6 @@ retry:
|
||||
dict_table_get_format(prebuilt->table));
|
||||
}
|
||||
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||
|
||||
/* Only if the table has an AUTOINC column. */
|
||||
if (prebuilt->table != NULL && table->found_next_number_field != NULL) {
|
||||
dict_table_autoinc_lock(prebuilt->table);
|
||||
@@ -3666,6 +3684,8 @@ retry:
|
||||
dict_table_autoinc_unlock(prebuilt->table);
|
||||
}
|
||||
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@@ -7512,28 +7532,15 @@ ha_innobase::info(
|
||||
dict_index_t* index;
|
||||
ha_rows rec_per_key;
|
||||
ib_int64_t n_rows;
|
||||
ulong j;
|
||||
ulong i;
|
||||
char path[FN_REFLEN];
|
||||
os_file_stat_t stat_info;
|
||||
|
||||
|
||||
DBUG_ENTER("info");
|
||||
|
||||
/* If we are forcing recovery at a high level, we will suppress
|
||||
statistics calculation on tables, because that may crash the
|
||||
server if an index is badly corrupted. */
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
/* We return success (0) instead of HA_ERR_CRASHED,
|
||||
because we want MySQL to process this query and not
|
||||
stop, like it would do if it received the error code
|
||||
HA_ERR_CRASHED. */
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/* We do not know if MySQL can call this function before calling
|
||||
external_lock(). To be safe, update the thd of the current table
|
||||
handle. */
|
||||
@@ -7628,12 +7635,18 @@ ha_innobase::info(
|
||||
acquiring latches inside InnoDB, we do not call it if we
|
||||
are asked by MySQL to avoid locking. Another reason to
|
||||
avoid the call is that it uses quite a lot of CPU.
|
||||
See Bug#38185.
|
||||
We do not update delete_length if no locking is requested
|
||||
so the "old" value can remain. delete_length is initialized
|
||||
to 0 in the ha_statistics' constructor. */
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
|
||||
See Bug#38185. */
|
||||
if (flag & HA_STATUS_NO_LOCK) {
|
||||
/* We do not update delete_length if no
|
||||
locking is requested so the "old" value can
|
||||
remain. delete_length is initialized to 0 in
|
||||
the ha_statistics' constructor. */
|
||||
} else if (UNIV_UNLIKELY
|
||||
(srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) {
|
||||
/* Avoid accessing the tablespace if
|
||||
innodb_crash_recovery is set to a high value. */
|
||||
stats.delete_length = 0;
|
||||
} else {
|
||||
/* lock the data dictionary to avoid races with
|
||||
ibd_file_missing and tablespace_discarded */
|
||||
row_mysql_lock_data_dictionary(prebuilt->trx);
|
||||
@@ -7678,6 +7691,7 @@ ha_innobase::info(
|
||||
}
|
||||
|
||||
if (flag & HA_STATUS_CONST) {
|
||||
ulong i;
|
||||
/* Verify the number of index in InnoDB and MySQL
|
||||
matches up. If prebuilt->clust_index_was_generated
|
||||
holds, InnoDB defines GEN_CLUST_INDEX internally */
|
||||
@@ -7694,6 +7708,7 @@ ha_innobase::info(
|
||||
}
|
||||
|
||||
for (i = 0; i < table->s->keys; i++) {
|
||||
ulong j;
|
||||
/* We could get index quickly through internal
|
||||
index mapping with the index translation table.
|
||||
The identity of index (match up index name with
|
||||
@@ -7759,6 +7774,11 @@ ha_innobase::info(
|
||||
}
|
||||
}
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
if (flag & HA_STATUS_ERRKEY) {
|
||||
const dict_index_t* err_index;
|
||||
|
||||
@@ -7779,6 +7799,7 @@ ha_innobase::info(
|
||||
stats.auto_increment_value = innobase_peek_autoinc();
|
||||
}
|
||||
|
||||
func_exit:
|
||||
prebuilt->trx->op_info = (char*)"";
|
||||
|
||||
DBUG_RETURN(0);
|
||||
@@ -9236,7 +9257,8 @@ ha_innobase::store_lock(
|
||||
&& (sql_command == SQLCOM_INSERT_SELECT
|
||||
|| sql_command == SQLCOM_REPLACE_SELECT
|
||||
|| sql_command == SQLCOM_UPDATE
|
||||
|| sql_command == SQLCOM_CREATE_TABLE)) {
|
||||
|| sql_command == SQLCOM_CREATE_TABLE
|
||||
|| sql_command == SQLCOM_SET_OPTION)) {
|
||||
|
||||
/* If we either have innobase_locks_unsafe_for_binlog
|
||||
option set or this session is using READ COMMITTED
|
||||
@@ -9244,9 +9266,9 @@ ha_innobase::store_lock(
|
||||
is not set to serializable and MySQL is doing
|
||||
INSERT INTO...SELECT or REPLACE INTO...SELECT
|
||||
or UPDATE ... = (SELECT ...) or CREATE ...
|
||||
SELECT... without FOR UPDATE or IN SHARE
|
||||
MODE in select, then we use consistent read
|
||||
for select. */
|
||||
SELECT... or SET ... = (SELECT ...) without
|
||||
FOR UPDATE or IN SHARE MODE in select,
|
||||
then we use consistent read for select. */
|
||||
|
||||
prebuilt->select_lock_type = LOCK_NONE;
|
||||
prebuilt->stored_select_lock_type = LOCK_NONE;
|
||||
|
@@ -468,9 +468,10 @@ btr_estimate_number_of_different_key_vals(
|
||||
Marks not updated extern fields as not-owned by this record. The ownership
|
||||
is transferred to the updated record which is inserted elsewhere in the
|
||||
index tree. In purge only the owner of externally stored field is allowed
|
||||
to free the field. */
|
||||
to free the field.
|
||||
@return TRUE if BLOB ownership was transferred */
|
||||
UNIV_INTERN
|
||||
void
|
||||
ibool
|
||||
btr_cur_mark_extern_inherited_fields(
|
||||
/*=================================*/
|
||||
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
|
||||
|
@@ -180,6 +180,7 @@ btr_search_update_hash_on_delete(
|
||||
btr_cur_t* cursor);/*!< in: cursor which was positioned on the
|
||||
record to delete using btr_cur_search_...,
|
||||
the record is not yet deleted */
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/********************************************************************//**
|
||||
Validates the search system.
|
||||
@return TRUE if ok */
|
||||
@@ -187,6 +188,9 @@ UNIV_INTERN
|
||||
ibool
|
||||
btr_search_validate(void);
|
||||
/*======================*/
|
||||
#else
|
||||
# define btr_search_validate() TRUE
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
|
||||
/** Flag: has the search system been enabled?
|
||||
Protected by btr_search_latch and btr_search_enabled_mutex. */
|
||||
|
@@ -94,6 +94,9 @@ enum db_err {
|
||||
|
||||
DB_PRIMARY_KEY_IS_NULL, /* a column in the PRIMARY KEY
|
||||
was found to be NULL */
|
||||
DB_FOREIGN_EXCEED_MAX_CASCADE, /* Foreign key constraint related
|
||||
cascading delete/update exceeds
|
||||
maximum allowed depth */
|
||||
|
||||
/* The following are partial failure codes */
|
||||
DB_FAIL = 1000,
|
||||
|
@@ -680,6 +680,22 @@ ulint
|
||||
dict_table_zip_size(
|
||||
/*================*/
|
||||
const dict_table_t* table); /*!< in: table */
|
||||
/*********************************************************************//**
|
||||
Obtain exclusive locks on all index trees of the table. This is to prevent
|
||||
accessing index trees while InnoDB is updating internal metadata for
|
||||
operations such as truncate tables. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_table_x_lock_indexes(
|
||||
/*======================*/
|
||||
dict_table_t* table); /*!< in: table */
|
||||
/*********************************************************************//**
|
||||
Release the exclusive locks on all index tree. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_table_x_unlock_indexes(
|
||||
/*========================*/
|
||||
dict_table_t* table); /*!< in: table */
|
||||
/********************************************************************//**
|
||||
Checks if a column is in the ordering columns of the clustered index of a
|
||||
table. Column prefixes are treated like whole columns.
|
||||
|
@@ -452,6 +452,48 @@ dict_table_zip_size(
|
||||
return(dict_table_flags_to_zip_size(table->flags));
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Obtain exclusive locks on all index trees of the table. This is to prevent
|
||||
accessing index trees while InnoDB is updating internal metadata for
|
||||
operations such as truncate tables. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_table_x_lock_indexes(
|
||||
/*======================*/
|
||||
dict_table_t* table) /*!< in: table */
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
||||
ut_a(table);
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
/* Loop through each index of the table and lock them */
|
||||
for (index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
rw_lock_x_lock(dict_index_get_lock(index));
|
||||
}
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Release the exclusive locks on all index tree. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_table_x_unlock_indexes(
|
||||
/*========================*/
|
||||
dict_table_t* table) /*!< in: table */
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
||||
ut_a(table);
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
for (index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
rw_lock_x_unlock(dict_index_get_lock(index));
|
||||
}
|
||||
}
|
||||
/********************************************************************//**
|
||||
Gets the number of fields in the internal representation of an index,
|
||||
including fields added by the dictionary system.
|
||||
|
@@ -97,6 +97,8 @@ ulint
|
||||
dict_load_foreigns(
|
||||
/*===============*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
ibool check_recursive,/*!< in: Whether to check recursive
|
||||
load of tables chained by FK */
|
||||
ibool check_charsets);/*!< in: TRUE=check charsets
|
||||
compatibility */
|
||||
/********************************************************************//**
|
||||
|
@@ -112,6 +112,21 @@ ROW_FORMAT=REDUNDANT. */
|
||||
in table->flags. */
|
||||
/* @} */
|
||||
|
||||
/** Tables could be chained together with Foreign key constraint. When
|
||||
first load the parent table, we would load all of its descedents.
|
||||
This could result in rescursive calls and out of stack error eventually.
|
||||
DICT_FK_MAX_RECURSIVE_LOAD defines the maximum number of recursive loads,
|
||||
when exceeded, the child table will not be loaded. It will be loaded when
|
||||
the foreign constraint check needs to be run. */
|
||||
#define DICT_FK_MAX_RECURSIVE_LOAD 250
|
||||
|
||||
/** Similarly, when tables are chained together with foreign key constraints
|
||||
with on cascading delete/update clause, delete from parent table could
|
||||
result in recursive cascading calls. This defines the maximum number of
|
||||
such cascading deletes/updates allowed. When exceeded, the delete from
|
||||
parent table will fail, and user has to drop excessive foreign constraint
|
||||
before proceeds. */
|
||||
#define FK_MAX_CASCADE_DEL 300
|
||||
|
||||
/**********************************************************************//**
|
||||
Creates a table memory object.
|
||||
@@ -434,6 +449,12 @@ struct dict_table_struct{
|
||||
NOT allowed until this count gets to zero;
|
||||
MySQL does NOT itself check the number of
|
||||
open handles at drop */
|
||||
unsigned fk_max_recusive_level:8;
|
||||
/*!< maximum recursive level we support when
|
||||
loading tables chained together with FK
|
||||
constraints. If exceeds this level, we will
|
||||
stop loading child table into memory along with
|
||||
its parent table */
|
||||
ulint n_foreign_key_checks_running;
|
||||
/*!< count of how many foreign key check
|
||||
operations are currently being performed
|
||||
|
@@ -186,6 +186,7 @@ ha_remove_all_nodes_to_page(
|
||||
hash_table_t* table, /*!< in: hash table */
|
||||
ulint fold, /*!< in: fold value */
|
||||
const page_t* page); /*!< in: buffer page */
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/*************************************************************//**
|
||||
Validates a given range of the cells in hash table.
|
||||
@return TRUE if ok */
|
||||
@@ -196,6 +197,7 @@ ha_validate(
|
||||
hash_table_t* table, /*!< in: hash table */
|
||||
ulint start_index, /*!< in: start index */
|
||||
ulint end_index); /*!< in: end index */
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
/*************************************************************//**
|
||||
Prints info of a hash table. */
|
||||
UNIV_INTERN
|
||||
|
@@ -381,6 +381,9 @@ struct que_thr_struct{
|
||||
thus far */
|
||||
ulint lock_state; /*!< lock state of thread (table or
|
||||
row) */
|
||||
ulint fk_cascade_depth; /*!< maximum cascading call depth
|
||||
supported for foreign key constraint
|
||||
related delete/updates */
|
||||
};
|
||||
|
||||
#define QUE_THR_MAGIC_N 8476583
|
||||
|
@@ -46,7 +46,7 @@ Created 1/20/1994 Heikki Tuuri
|
||||
|
||||
#define INNODB_VERSION_MAJOR 1
|
||||
#define INNODB_VERSION_MINOR 0
|
||||
#define INNODB_VERSION_BUGFIX 11
|
||||
#define INNODB_VERSION_BUGFIX 12
|
||||
|
||||
/* The following is the InnoDB version as shown in
|
||||
SELECT plugin_version FROM information_schema.plugins;
|
||||
|
@@ -113,7 +113,8 @@ ut_test_malloc(
|
||||
ulint n); /*!< in: try to allocate this many bytes */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/**********************************************************************//**
|
||||
Frees a memory block allocated with ut_malloc. */
|
||||
Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
|
||||
a nop. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
ut_free(
|
||||
|
@@ -2395,7 +2395,7 @@ row_merge_rename_tables(
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
err = dict_load_foreigns(old_name, TRUE);
|
||||
err = dict_load_foreigns(old_name, FALSE, TRUE);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
err_exit:
|
||||
|
@@ -576,6 +576,13 @@ handle_new_error:
|
||||
"InnoDB: " REFMAN "forcing-recovery.html"
|
||||
" for help.\n", stderr);
|
||||
break;
|
||||
case DB_FOREIGN_EXCEED_MAX_CASCADE:
|
||||
fprintf(stderr, "InnoDB: Cannot delete/update rows with"
|
||||
" cascading foreign key constraints that exceed max"
|
||||
" depth of %lu\n"
|
||||
"Please drop excessive foreign constraints"
|
||||
" and try again\n", (ulong) DICT_FK_MAX_RECURSIVE_LOAD);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "InnoDB: unknown error code %lu\n",
|
||||
(ulong) err);
|
||||
@@ -1381,11 +1388,15 @@ row_update_for_mysql(
|
||||
run_again:
|
||||
thr->run_node = node;
|
||||
thr->prev_node = node;
|
||||
thr->fk_cascade_depth = 0;
|
||||
|
||||
row_upd_step(thr);
|
||||
|
||||
err = trx->error_state;
|
||||
|
||||
/* Reset fk_cascade_depth back to 0 */
|
||||
thr->fk_cascade_depth = 0;
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
que_thr_stop_for_mysql(thr);
|
||||
|
||||
@@ -1576,6 +1587,12 @@ row_update_cascade_for_mysql(
|
||||
trx_t* trx;
|
||||
|
||||
trx = thr_get_trx(thr);
|
||||
|
||||
thr->fk_cascade_depth++;
|
||||
|
||||
if (thr->fk_cascade_depth > FK_MAX_CASCADE_DEL) {
|
||||
return (DB_FOREIGN_EXCEED_MAX_CASCADE);
|
||||
}
|
||||
run_again:
|
||||
thr->run_node = node;
|
||||
thr->prev_node = node;
|
||||
@@ -2056,7 +2073,7 @@ row_table_add_foreign_constraints(
|
||||
name, reject_fks);
|
||||
if (err == DB_SUCCESS) {
|
||||
/* Check that also referencing constraints are ok */
|
||||
err = dict_load_foreigns(name, TRUE);
|
||||
err = dict_load_foreigns(name, FALSE, TRUE);
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
@@ -2749,6 +2766,15 @@ row_truncate_table_for_mysql(
|
||||
|
||||
trx->table_id = table->id;
|
||||
|
||||
/* Lock all index trees for this table, as we will
|
||||
truncate the table/index and possibly change their metadata.
|
||||
All DML/DDL are blocked by table level lock, with
|
||||
a few exceptions such as queries into information schema
|
||||
about the table, MySQL could try to access index stats
|
||||
for this kind of query, we need to use index locks to
|
||||
sync up */
|
||||
dict_table_x_lock_indexes(table);
|
||||
|
||||
if (table->space && !table->dir_path_of_temp_table) {
|
||||
/* Discard and create the single-table tablespace. */
|
||||
ulint space = table->space;
|
||||
@@ -2765,6 +2791,7 @@ row_truncate_table_for_mysql(
|
||||
|| fil_create_new_single_table_tablespace(
|
||||
space, table->name, FALSE, flags,
|
||||
FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) {
|
||||
dict_table_x_unlock_indexes(table);
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: TRUNCATE TABLE %s failed to"
|
||||
@@ -2868,6 +2895,10 @@ next_rec:
|
||||
|
||||
mem_heap_free(heap);
|
||||
|
||||
/* Done with index truncation, release index tree locks,
|
||||
subsequent work relates to table level metadata change */
|
||||
dict_table_x_unlock_indexes(table);
|
||||
|
||||
dict_hdr_get_new_id(&new_id, NULL, NULL);
|
||||
|
||||
info = pars_info_create();
|
||||
@@ -3915,7 +3946,7 @@ end:
|
||||
an ALTER, not in a RENAME. */
|
||||
|
||||
err = dict_load_foreigns(
|
||||
new_name, !old_is_tmp || trx->check_foreigns);
|
||||
new_name, FALSE, !old_is_tmp || trx->check_foreigns);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
@@ -2690,7 +2690,6 @@ row_sel_store_mysql_rec(
|
||||
ut_ad(prebuilt->mysql_template);
|
||||
ut_ad(prebuilt->default_rec);
|
||||
ut_ad(rec_offs_validate(rec, NULL, offsets));
|
||||
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
|
||||
|
||||
if (UNIV_LIKELY_NULL(prebuilt->blob_heap)) {
|
||||
mem_heap_free(prebuilt->blob_heap);
|
||||
@@ -3611,6 +3610,7 @@ row_search_for_mysql(
|
||||
row_sel_try_search_shortcut_for_mysql().
|
||||
The latch will not be released until
|
||||
mtr_commit(&mtr). */
|
||||
ut_ad(!rec_get_deleted_flag(rec, comp));
|
||||
|
||||
if (!row_sel_store_mysql_rec(buf, prebuilt,
|
||||
rec, offsets)) {
|
||||
@@ -4238,7 +4238,7 @@ no_gap_lock:
|
||||
|
||||
rec = old_vers;
|
||||
}
|
||||
} else if (!lock_sec_rec_cons_read_sees(rec, trx->read_view)) {
|
||||
} else {
|
||||
/* We are looking into a non-clustered index,
|
||||
and to get the right version of the record we
|
||||
have to look also into the clustered index: this
|
||||
@@ -4246,8 +4246,12 @@ no_gap_lock:
|
||||
information via the clustered index record. */
|
||||
|
||||
ut_ad(index != clust_index);
|
||||
ut_ad(!dict_index_is_clust(index));
|
||||
|
||||
goto requires_clust_rec;
|
||||
if (!lock_sec_rec_cons_read_sees(
|
||||
rec, trx->read_view)) {
|
||||
goto requires_clust_rec;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4370,8 +4374,13 @@ requires_clust_rec:
|
||||
ULINT_UNDEFINED, &heap);
|
||||
result_rec = rec;
|
||||
}
|
||||
|
||||
/* result_rec can legitimately be delete-marked
|
||||
now that it has been established that it points to a
|
||||
clustered index record that exists in the read view. */
|
||||
} else {
|
||||
result_rec = rec;
|
||||
ut_ad(!rec_get_deleted_flag(rec, comp));
|
||||
}
|
||||
|
||||
/* We found a qualifying record 'result_rec'. At this point,
|
||||
|
@@ -1598,6 +1598,7 @@ row_upd_clust_rec_by_insert(
|
||||
dict_table_t* table;
|
||||
dtuple_t* entry;
|
||||
ulint err;
|
||||
ibool change_ownership = FALSE;
|
||||
|
||||
ut_ad(node);
|
||||
ut_ad(dict_index_is_clust(index));
|
||||
@@ -1630,9 +1631,9 @@ row_upd_clust_rec_by_insert(
|
||||
index = dict_table_get_first_index(table);
|
||||
offsets = rec_get_offsets(rec, index, offsets_,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
btr_cur_mark_extern_inherited_fields(
|
||||
btr_cur_get_page_zip(btr_cur),
|
||||
rec, index, offsets, node->update, mtr);
|
||||
change_ownership = btr_cur_mark_extern_inherited_fields(
|
||||
btr_cur_get_page_zip(btr_cur), rec, index, offsets,
|
||||
node->update, mtr);
|
||||
if (check_ref) {
|
||||
/* NOTE that the following call loses
|
||||
the position of pcur ! */
|
||||
@@ -1661,10 +1662,11 @@ row_upd_clust_rec_by_insert(
|
||||
|
||||
row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
|
||||
|
||||
if (node->upd_ext) {
|
||||
if (change_ownership) {
|
||||
/* If we return from a lock wait, for example, we may have
|
||||
extern fields marked as not-owned in entry (marked in the
|
||||
if-branch above). We must unmark them. */
|
||||
if-branch above). We must unmark them, take the ownership
|
||||
back. */
|
||||
|
||||
btr_cur_unmark_dtuple_extern_fields(entry);
|
||||
|
||||
|
@@ -1938,7 +1938,8 @@ trx_undo_update_cleanup(
|
||||
|
||||
UT_LIST_ADD_FIRST(undo_list, rseg->update_undo_cached, undo);
|
||||
} else {
|
||||
ut_ad(undo->state == TRX_UNDO_TO_PURGE);
|
||||
ut_ad(undo->state == TRX_UNDO_TO_PURGE
|
||||
|| undo->state == TRX_UNDO_TO_FREE);
|
||||
|
||||
trx_undo_mem_free(undo);
|
||||
}
|
||||
|
@@ -290,7 +290,8 @@ ut_test_malloc(
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/**********************************************************************//**
|
||||
Frees a memory block allocated with ut_malloc. */
|
||||
Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
|
||||
a nop. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
ut_free(
|
||||
@@ -300,7 +301,9 @@ ut_free(
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
ut_mem_block_t* block;
|
||||
|
||||
if (UNIV_LIKELY(srv_use_sys_malloc)) {
|
||||
if (ptr == NULL) {
|
||||
return;
|
||||
} else if (UNIV_LIKELY(srv_use_sys_malloc)) {
|
||||
free(ptr);
|
||||
return;
|
||||
}
|
||||
|
Reference in New Issue
Block a user