mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Many files:
Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE sql_select.cc: Remove superfluous prints to .err log when a locking SELECT fails to a deadlock or a lock wait timeout sql/sql_select.cc: Remove superfluous prints to .err log when a locking SELECT fails to a deadlock or a lock wait timeout innobase/btr/btr0sea.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/dict/dict0dict.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/fsp/fsp0fsp.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/ibuf/ibuf0ibuf.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/buf0buf.h: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/db0err.h: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/dict0mem.h: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/mem0mem.h: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/row0mysql.h: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/row0upd.h: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/include/mem0mem.ic: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/mem/mem0pool.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/row/row0ins.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/row/row0mysql.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/row/row0sel.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/row/row0upd.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/srv/srv0start.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE innobase/ut/ut0ut.c: Merge InnoDB-4.0.7. Support for ON UPDATE CASCADE
This commit is contained in:
@ -19,6 +19,9 @@ Created 2/17/1996 Heikki Tuuri
|
||||
#include "btr0btr.h"
|
||||
#include "ha0ha.h"
|
||||
|
||||
ulint btr_search_this_is_zero = 0; /* A dummy variable to fool the
|
||||
compiler */
|
||||
|
||||
ulint btr_search_n_succ = 0;
|
||||
ulint btr_search_n_hash_fail = 0;
|
||||
|
||||
@ -56,16 +59,20 @@ before hash index building is started */
|
||||
|
||||
/************************************************************************
|
||||
Builds a hash index on a page with the given parameters. If the page already
|
||||
has a hash index with different parameters, the old hash index is removed. */
|
||||
has a hash index with different parameters, the old hash index is removed.
|
||||
If index is non-NULL, this function checks if n_fields and n_bytes are
|
||||
sensible values, and does not build a hash index if not. */
|
||||
static
|
||||
void
|
||||
btr_search_build_page_hash_index(
|
||||
/*=============================*/
|
||||
page_t* page, /* in: index page, s- or x-latched */
|
||||
ulint n_fields, /* in: hash this many full fields */
|
||||
ulint n_bytes, /* in: hash this many bytes from the next
|
||||
dict_index_t* index, /* in: index for which to build, or NULL if
|
||||
not known */
|
||||
page_t* page, /* in: index page, s- or x-latched */
|
||||
ulint n_fields,/* in: hash this many full fields */
|
||||
ulint n_bytes,/* in: hash this many bytes from the next
|
||||
field */
|
||||
ulint side); /* in: hash for searches from this side */
|
||||
ulint side); /* in: hash for searches from this side */
|
||||
|
||||
/*********************************************************************
|
||||
This function should be called before reserving any btr search mutex, if
|
||||
@ -173,7 +180,9 @@ btr_search_info_create(
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Updates the search info of an index about hash successes. */
|
||||
Updates the search info of an index about hash successes. NOTE that info
|
||||
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
|
||||
are consistent. */
|
||||
static
|
||||
void
|
||||
btr_search_info_update_hash(
|
||||
@ -295,7 +304,9 @@ set_new_recomm:
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Updates the block search info on hash successes. */
|
||||
Updates the block search info on hash successes. NOTE that info and
|
||||
block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any
|
||||
semaphore, to save CPU time! Do not assume the fields are consistent. */
|
||||
static
|
||||
ibool
|
||||
btr_search_update_block_hash_info(
|
||||
@ -425,12 +436,19 @@ btr_search_info_update_slow(
|
||||
{
|
||||
buf_block_t* block;
|
||||
ibool build_index;
|
||||
ulint* params;
|
||||
ulint* params2;
|
||||
|
||||
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
|
||||
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
||||
|
||||
block = buf_block_align(btr_cur_get_rec(cursor));
|
||||
|
||||
/* NOTE that the following two function calls do NOT protect
|
||||
info or block->n_fields etc. with any semaphore, to save CPU time!
|
||||
We cannot assume the fields are consistent when we return from
|
||||
those functions! */
|
||||
|
||||
btr_search_info_update_hash(info, cursor);
|
||||
|
||||
build_index = btr_search_update_block_hash_info(info, block, cursor);
|
||||
@ -453,10 +471,30 @@ btr_search_info_update_slow(
|
||||
}
|
||||
|
||||
if (build_index) {
|
||||
btr_search_build_page_hash_index(block->frame,
|
||||
block->n_fields,
|
||||
block->n_bytes,
|
||||
block->side);
|
||||
/* Note that since we did not protect block->n_fields etc.
|
||||
with any semaphore, the values can be inconsistent. We have
|
||||
to check inside the function call that they make sense. We
|
||||
also malloc an array and store the values there to make sure
|
||||
the compiler does not let the function call parameters change
|
||||
inside the called function. It might be that the compiler
|
||||
would optimize the call just to pass pointers to block. */
|
||||
|
||||
params = mem_alloc(3 * sizeof(ulint));
|
||||
params[0] = block->n_fields;
|
||||
params[1] = block->n_bytes;
|
||||
params[2] = block->side;
|
||||
|
||||
/* Make sure the compiler cannot deduce the values and do
|
||||
optimizations */
|
||||
|
||||
params2 = params + btr_search_this_is_zero;
|
||||
|
||||
btr_search_build_page_hash_index(cursor->index,
|
||||
block->frame,
|
||||
params2[0],
|
||||
params2[1],
|
||||
params2[2]);
|
||||
mem_free(params);
|
||||
}
|
||||
}
|
||||
|
||||
@ -974,16 +1012,20 @@ btr_search_drop_page_hash_when_freed(
|
||||
|
||||
/************************************************************************
|
||||
Builds a hash index on a page with the given parameters. If the page already
|
||||
has a hash index with different parameters, the old hash index is removed. */
|
||||
has a hash index with different parameters, the old hash index is removed.
|
||||
If index is non-NULL, this function checks if n_fields and n_bytes are
|
||||
sensible values, and does not build a hash index if not. */
|
||||
static
|
||||
void
|
||||
btr_search_build_page_hash_index(
|
||||
/*=============================*/
|
||||
page_t* page, /* in: index page, s- or x-latched */
|
||||
ulint n_fields, /* in: hash this many full fields */
|
||||
ulint n_bytes, /* in: hash this many bytes from the next
|
||||
dict_index_t* index, /* in: index for which to build, or NULL if
|
||||
not known */
|
||||
page_t* page, /* in: index page, s- or x-latched */
|
||||
ulint n_fields,/* in: hash this many full fields */
|
||||
ulint n_bytes,/* in: hash this many bytes from the next
|
||||
field */
|
||||
ulint side) /* in: hash for searches from this side */
|
||||
ulint side) /* in: hash for searches from this side */
|
||||
{
|
||||
hash_table_t* table;
|
||||
buf_block_t* block;
|
||||
@ -1026,9 +1068,17 @@ btr_search_build_page_hash_index(
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check that the values for hash index build are sensible */
|
||||
|
||||
if (n_fields + n_bytes == 0) {
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
if (index && (dict_index_get_n_unique_in_tree(index) < n_fields
|
||||
|| (dict_index_get_n_unique_in_tree(index) == n_fields
|
||||
&& n_bytes > 0))) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Calculate and cache fold values and corresponding records into
|
||||
@ -1187,8 +1237,8 @@ btr_search_move_or_delete_hash_entries(
|
||||
|
||||
ut_a(n_fields + n_bytes > 0);
|
||||
|
||||
btr_search_build_page_hash_index(new_page, n_fields, n_bytes,
|
||||
side);
|
||||
btr_search_build_page_hash_index(NULL, new_page, n_fields,
|
||||
n_bytes, side);
|
||||
ut_a(n_fields == block->curr_n_fields);
|
||||
ut_a(n_bytes == block->curr_n_bytes);
|
||||
ut_a(side == block->curr_side);
|
||||
|
@ -1145,23 +1145,25 @@ dict_index_add_to_cache(
|
||||
}
|
||||
|
||||
/* Check that the same column does not appear twice in the index.
|
||||
InnoDB assumes this in its algorithms, e.g., update of an index
|
||||
entry */
|
||||
InnoDB assumes this in its algorithms, e.g., update of an index
|
||||
entry */
|
||||
|
||||
for (i = 0; i < dict_index_get_n_fields(index); i++) {
|
||||
|
||||
for (j = 0; j < i; j++) {
|
||||
if (dict_index_get_nth_field(index, j)->col
|
||||
== dict_index_get_nth_field(index, i)->col) {
|
||||
for (j = 0; j < i; j++) {
|
||||
if (dict_index_get_nth_field(index, j)->col
|
||||
== dict_index_get_nth_field(index, i)->col) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: column %s appears twice in index %s of table %s\n"
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: column %s appears twice in index %s of table %s\n"
|
||||
"InnoDB: This is not allowed in InnoDB.\n"
|
||||
"InnoDB: UPDATE can cause such an index to become corrupt in InnoDB.\n",
|
||||
dict_index_get_nth_field(index, i)->col->name,
|
||||
index->name, table->name);
|
||||
}
|
||||
}
|
||||
dict_index_get_nth_field(index, i)->col->name,
|
||||
index->name, table->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Build the cache internal representation of the index,
|
||||
@ -2233,6 +2235,9 @@ dict_create_foreign_constraints(
|
||||
ulint error;
|
||||
ulint i;
|
||||
ulint j;
|
||||
ibool is_on_delete;
|
||||
ulint n_on_deletes;
|
||||
ulint n_on_updates;
|
||||
dict_col_t* columns[500];
|
||||
char* column_names[500];
|
||||
ulint column_name_lens[500];
|
||||
@ -2392,6 +2397,12 @@ col_loop2:
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
n_on_deletes = 0;
|
||||
n_on_updates = 0;
|
||||
|
||||
scan_on_conditions:
|
||||
/* Loop here as long as we can find ON ... conditions */
|
||||
|
||||
ptr = dict_accept(ptr, "ON", &success);
|
||||
|
||||
if (!success) {
|
||||
@ -2402,23 +2413,58 @@ col_loop2:
|
||||
ptr = dict_accept(ptr, "DELETE", &success);
|
||||
|
||||
if (!success) {
|
||||
dict_foreign_free(foreign);
|
||||
ptr = dict_accept(ptr, "UPDATE", &success);
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
if (!success) {
|
||||
|
||||
dict_foreign_free(foreign);
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
is_on_delete = FALSE;
|
||||
n_on_updates++;
|
||||
} else {
|
||||
is_on_delete = TRUE;
|
||||
n_on_deletes++;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "RESTRICT", &success);
|
||||
|
||||
if (success) {
|
||||
goto try_find_index;
|
||||
goto scan_on_conditions;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "CASCADE", &success);
|
||||
|
||||
if (success) {
|
||||
foreign->type = DICT_FOREIGN_ON_DELETE_CASCADE;
|
||||
if (is_on_delete) {
|
||||
foreign->type |= DICT_FOREIGN_ON_DELETE_CASCADE;
|
||||
} else {
|
||||
foreign->type |= DICT_FOREIGN_ON_UPDATE_CASCADE;
|
||||
}
|
||||
|
||||
goto try_find_index;
|
||||
goto scan_on_conditions;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "NO", &success);
|
||||
|
||||
if (success) {
|
||||
ptr = dict_accept(ptr, "ACTION", &success);
|
||||
|
||||
if (!success) {
|
||||
dict_foreign_free(foreign);
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
if (is_on_delete) {
|
||||
foreign->type |= DICT_FOREIGN_ON_DELETE_NO_ACTION;
|
||||
} else {
|
||||
foreign->type |= DICT_FOREIGN_ON_UPDATE_NO_ACTION;
|
||||
}
|
||||
|
||||
goto scan_on_conditions;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "SET", &success);
|
||||
@ -2451,15 +2497,18 @@ col_loop2:
|
||||
}
|
||||
}
|
||||
|
||||
foreign->type = DICT_FOREIGN_ON_DELETE_SET_NULL;
|
||||
if (is_on_delete) {
|
||||
foreign->type |= DICT_FOREIGN_ON_DELETE_SET_NULL;
|
||||
} else {
|
||||
foreign->type |= DICT_FOREIGN_ON_UPDATE_SET_NULL;
|
||||
}
|
||||
|
||||
goto scan_on_conditions;
|
||||
|
||||
try_find_index:
|
||||
/* We check that there are no superfluous words like 'ON UPDATE ...'
|
||||
which we do not support yet. */
|
||||
if (n_on_deletes > 1 || n_on_updates > 1) {
|
||||
/* It is an error to define more than 1 action */
|
||||
|
||||
ptr = dict_accept(ptr, (char *) "ON", &success);
|
||||
|
||||
if (success) {
|
||||
dict_foreign_free(foreign);
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
@ -3286,7 +3335,8 @@ dict_print_info_on_foreign_keys_in_create_format(
|
||||
/*=============================================*/
|
||||
char* buf, /* in: auxiliary buffer */
|
||||
char* str, /* in/out: pointer to a string */
|
||||
ulint len, /* in: space in str available for info */
|
||||
ulint len, /* in: str has to be a buffer at least
|
||||
len + 5000 bytes */
|
||||
dict_table_t* table) /* in: table */
|
||||
{
|
||||
|
||||
@ -3356,14 +3406,30 @@ dict_print_info_on_foreign_keys_in_create_format(
|
||||
|
||||
buf2 += sprintf(buf2, ")");
|
||||
|
||||
if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE) {
|
||||
if (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE) {
|
||||
buf2 += sprintf(buf2, " ON DELETE CASCADE");
|
||||
}
|
||||
|
||||
if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL) {
|
||||
if (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) {
|
||||
buf2 += sprintf(buf2, " ON DELETE SET NULL");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) {
|
||||
buf2 += sprintf(buf2, " ON DELETE NO ACTION");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) {
|
||||
buf2 += sprintf(buf2, " ON UPDATE CASCADE");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) {
|
||||
buf2 += sprintf(buf2, " ON UPDATE SET NULL");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) {
|
||||
buf2 += sprintf(buf2, " ON UPDATE NO ACTION");
|
||||
}
|
||||
|
||||
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
|
||||
}
|
||||
no_space:
|
||||
@ -3455,6 +3521,22 @@ dict_print_info_on_foreign_keys(
|
||||
buf2 += sprintf(buf2, " ON DELETE SET NULL");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) {
|
||||
buf2 += sprintf(buf2, " ON DELETE NO ACTION");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) {
|
||||
buf2 += sprintf(buf2, " ON UPDATE CASCADE");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) {
|
||||
buf2 += sprintf(buf2, " ON UPDATE SET NULL");
|
||||
}
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) {
|
||||
buf2 += sprintf(buf2, " ON UPDATE NO ACTION");
|
||||
}
|
||||
|
||||
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
|
||||
}
|
||||
no_space:
|
||||
|
@ -2479,20 +2479,20 @@ try_again:
|
||||
n_free = n_free_list_ext + n_free_up;
|
||||
|
||||
if (alloc_type == FSP_NORMAL) {
|
||||
/* We reserve 1 extent + 4 % of the space size to undo logs
|
||||
and 1 extent + 1 % to cleaning operations; NOTE: this source
|
||||
/* We reserve 1 extent + 0.5 % of the space size to undo logs
|
||||
and 1 extent + 0.5 % to cleaning operations; NOTE: this source
|
||||
code is duplicated in the function below! */
|
||||
|
||||
reserve = 2 + ((size / FSP_EXTENT_SIZE) * 5) / 100;
|
||||
reserve = 2 + ((size / FSP_EXTENT_SIZE) * 2) / 200;
|
||||
|
||||
if (n_free <= reserve + n_ext) {
|
||||
|
||||
goto try_to_extend;
|
||||
}
|
||||
} else if (alloc_type == FSP_UNDO) {
|
||||
/* We reserve 1 % of the space size to cleaning operations */
|
||||
/* We reserve 0.5 % of the space size to cleaning operations */
|
||||
|
||||
reserve = 1 + ((size / FSP_EXTENT_SIZE) * 1) / 100;
|
||||
reserve = 1 + ((size / FSP_EXTENT_SIZE) * 1) / 200;
|
||||
|
||||
if (n_free <= reserve + n_ext) {
|
||||
|
||||
@ -2572,11 +2572,11 @@ fsp_get_available_space_in_free_extents(
|
||||
|
||||
n_free = n_free_list_ext + n_free_up;
|
||||
|
||||
/* We reserve 1 extent + 4 % of the space size to undo logs
|
||||
and 1 extent + 1 % to cleaning operations; NOTE: this source
|
||||
/* We reserve 1 extent + 0.5 % of the space size to undo logs
|
||||
and 1 extent + 0.5 % to cleaning operations; NOTE: this source
|
||||
code is duplicated in the function above! */
|
||||
|
||||
reserve = 2 + ((size / FSP_EXTENT_SIZE) * 5) / 100;
|
||||
reserve = 2 + ((size / FSP_EXTENT_SIZE) * 2) / 200;
|
||||
|
||||
if (reserve > n_free) {
|
||||
return(0);
|
||||
|
@ -2658,9 +2658,6 @@ reset_bit:
|
||||
}
|
||||
}
|
||||
|
||||
ibuf_data->n_merges++;
|
||||
ibuf_data->n_merged_recs += n_inserts;
|
||||
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
/* printf("Ibuf merge %lu records volume %lu to page no %lu\n",
|
||||
n_inserts, volume, page_no); */
|
||||
@ -2670,6 +2667,14 @@ reset_bit:
|
||||
|
||||
mem_heap_free(heap);
|
||||
|
||||
/* Protect our statistics keeping from race conditions */
|
||||
mutex_enter(&ibuf_mutex);
|
||||
|
||||
ibuf_data->n_merges++;
|
||||
ibuf_data->n_merged_recs += n_inserts;
|
||||
|
||||
mutex_exit(&ibuf_mutex);
|
||||
|
||||
ibuf_exit();
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
ut_a(ibuf_count_get(space, page_no) == 0);
|
||||
|
@ -728,8 +728,8 @@ struct buf_block_struct{
|
||||
bufferfixed, or (2) the thread has an
|
||||
x-latch on the block */
|
||||
|
||||
/* 5. Hash search fields: NOTE that these fields are protected by
|
||||
btr_search_mutex */
|
||||
/* 5. Hash search fields: NOTE that the first 4 fields are NOT
|
||||
protected by any semaphore! */
|
||||
|
||||
ulint n_hash_helps; /* counter which controls building
|
||||
of a new hash index for the page */
|
||||
@ -742,6 +742,9 @@ struct buf_block_struct{
|
||||
whether the leftmost record of several
|
||||
records with the same prefix should be
|
||||
indexed in the hash index */
|
||||
|
||||
/* The following 4 fields are protected by btr_search_latch: */
|
||||
|
||||
ibool is_hashed; /* TRUE if hash index has already been
|
||||
built on this page; note that it does
|
||||
not guarantee that the index is
|
||||
|
@ -42,7 +42,8 @@ Created 5/24/1996 Heikki Tuuri
|
||||
#define DB_CANNOT_ADD_CONSTRAINT 38 /* adding a foreign key constraint
|
||||
to a table failed */
|
||||
#define DB_CORRUPTION 39 /* data structure corruption noticed */
|
||||
#define DB_COL_APPEARS_TWICE_IN_INDEX 40
|
||||
#define DB_COL_APPEARS_TWICE_IN_INDEX 40 /* InnoDB cannot handle an index
|
||||
where same column appears twice */
|
||||
|
||||
/* The following are partial failure codes */
|
||||
#define DB_FAIL 1000
|
||||
|
@ -280,8 +280,15 @@ struct dict_foreign_struct{
|
||||
table */
|
||||
};
|
||||
|
||||
/* The flags for ON_UPDATE and ON_DELETE can be ORed; the default is that
|
||||
a foreign key constraint is enforced, therefore RESTRICT just means no flag */
|
||||
#define DICT_FOREIGN_ON_DELETE_CASCADE 1
|
||||
#define DICT_FOREIGN_ON_DELETE_SET_NULL 2
|
||||
#define DICT_FOREIGN_ON_UPDATE_CASCADE 4
|
||||
#define DICT_FOREIGN_ON_UPDATE_SET_NULL 8
|
||||
#define DICT_FOREIGN_ON_DELETE_NO_ACTION 16
|
||||
#define DICT_FOREIGN_ON_UPDATE_NO_ACTION 32
|
||||
|
||||
|
||||
#define DICT_INDEX_MAGIC_N 76789786
|
||||
|
||||
|
@ -127,16 +127,18 @@ mem_heap_create_func(
|
||||
ulint line /* in: line where created */
|
||||
);
|
||||
/*********************************************************************
|
||||
NOTE: Use the corresponding macro instead of this function.
|
||||
Frees the space occupied by a memory heap. */
|
||||
NOTE: Use the corresponding macro instead of this function. Frees the space
|
||||
occupied by a memory heap. In the debug version erases the heap memory
|
||||
blocks. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_func(
|
||||
/*===============*/
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
char* file_name, /* in: file name where freed */
|
||||
ulint line /* in: line where freed */
|
||||
);
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
char* file_name __attribute__((unused)),
|
||||
/* in: file name where freed */
|
||||
ulint line __attribute__((unused)));
|
||||
/* in: line where freed */
|
||||
/*******************************************************************
|
||||
Allocates n bytes of memory from a memory heap. */
|
||||
UNIV_INLINE
|
||||
|
@ -440,9 +440,10 @@ void
|
||||
mem_heap_free_func(
|
||||
/*===============*/
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
char* file_name, /* in: file name where freed */
|
||||
ulint line /* in: line where freed */
|
||||
)
|
||||
char* file_name __attribute__((unused)),
|
||||
/* in: file name where freed */
|
||||
ulint line __attribute__((unused)))
|
||||
/* in: line where freed */
|
||||
{
|
||||
mem_block_t* block;
|
||||
mem_block_t* prev_block;
|
||||
|
@ -492,7 +492,11 @@ struct row_prebuilt_struct {
|
||||
fetch many rows from the same cursor:
|
||||
it saves CPU time to fetch them in a
|
||||
batch; we reserve mysql_row_len
|
||||
bytes for each such row */
|
||||
bytes for each such row; these
|
||||
pointers point 4 bytes past the
|
||||
allocated mem buf start, because
|
||||
there is a 4 byte magic number at the
|
||||
start and at the end */
|
||||
ulint fetch_cache_first;/* position of the first not yet
|
||||
fetched row in fetch_cache */
|
||||
ulint n_fetch_cached; /* number of not yet fetched rows
|
||||
@ -501,8 +505,12 @@ struct row_prebuilt_struct {
|
||||
to this heap */
|
||||
mem_heap_t* old_vers_heap; /* memory heap where a previous
|
||||
version is built in consistent read */
|
||||
ulint magic_n2; /* this should be the same as
|
||||
magic_n */
|
||||
};
|
||||
|
||||
#define ROW_PREBUILT_FETCH_MAGIC_N 465765687
|
||||
|
||||
#define ROW_MYSQL_WHOLE_ROW 0
|
||||
#define ROW_MYSQL_REC_FIELDS 1
|
||||
#define ROW_MYSQL_NO_TEMPLATE 2
|
||||
|
@ -312,8 +312,11 @@ struct upd_node_struct{
|
||||
ibool in_mysql_interface;
|
||||
/* TRUE if the update node was created
|
||||
for the MySQL interface */
|
||||
dict_foreign_t* foreign;/* NULL or pointer to a foreign key
|
||||
constraint if this update node is used in
|
||||
doing an ON DELETE or ON UPDATE operation */
|
||||
upd_node_t* cascade_node;/* NULL or an update node template which
|
||||
is used to implement ON DELETE CASCADE
|
||||
is used to implement ON DELETE/UPDATE CASCADE
|
||||
or ... SET NULL for foreign keys */
|
||||
mem_heap_t* cascade_heap;/* NULL or a mem heap where the cascade
|
||||
node is created */
|
||||
|
@ -15,6 +15,7 @@ Created 5/12/1997 Heikki Tuuri
|
||||
#include "ut0mem.h"
|
||||
#include "ut0lst.h"
|
||||
#include "ut0byte.h"
|
||||
#include "mem0mem.h"
|
||||
|
||||
/* We would like to use also the buffer frames to allocate memory. This
|
||||
would be desirable, because then the memory consumption of the database
|
||||
@ -251,7 +252,6 @@ mem_pool_fill_free_list(
|
||||
mem_area_t* area;
|
||||
mem_area_t* area2;
|
||||
ibool ret;
|
||||
char err_buf[500];
|
||||
|
||||
ut_ad(mutex_own(&(pool->mutex)));
|
||||
|
||||
@ -300,11 +300,8 @@ mem_pool_fill_free_list(
|
||||
}
|
||||
|
||||
if (UT_LIST_GET_LEN(pool->free_list[i + 1]) == 0) {
|
||||
ut_sprintf_buf(err_buf, ((byte*)area) - 50, 100);
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Removing element from mem pool free list %lu\n"
|
||||
"InnoDB: though the list length is 0! Dump of 100 bytes around element:\n%s\n",
|
||||
i + 1, err_buf);
|
||||
mem_analyze_corruption((byte*)area);
|
||||
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
@ -340,7 +337,6 @@ mem_area_alloc(
|
||||
mem_area_t* area;
|
||||
ulint n;
|
||||
ibool ret;
|
||||
char err_buf[500];
|
||||
|
||||
n = ut_2_log(ut_max(size + MEM_AREA_EXTRA_SIZE, MEM_AREA_MIN_SIZE));
|
||||
|
||||
@ -364,20 +360,22 @@ mem_area_alloc(
|
||||
}
|
||||
|
||||
if (!mem_area_get_free(area)) {
|
||||
ut_sprintf_buf(err_buf, ((byte*)area) - 50, 100);
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Removing element from mem pool free list %lu though the\n"
|
||||
"InnoDB: element is not marked free! Dump of 100 bytes around element:\n%s\n",
|
||||
n, err_buf);
|
||||
"InnoDB: element is not marked free!\n",
|
||||
n);
|
||||
|
||||
mem_analyze_corruption((byte*)area);
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
if (UT_LIST_GET_LEN(pool->free_list[n]) == 0) {
|
||||
ut_sprintf_buf(err_buf, ((byte*)area) - 50, 100);
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Removing element from mem pool free list %lu\n"
|
||||
"InnoDB: though the list length is 0! Dump of 100 bytes around element:\n%s\n",
|
||||
n, err_buf);
|
||||
"InnoDB: though the list length is 0!\n",
|
||||
n);
|
||||
mem_analyze_corruption((byte*)area);
|
||||
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
@ -451,7 +449,6 @@ mem_area_free(
|
||||
void* new_ptr;
|
||||
ulint size;
|
||||
ulint n;
|
||||
char err_buf[500];
|
||||
|
||||
if (mem_out_of_mem_err_msg_count > 0) {
|
||||
/* It may be that the area was really allocated from the
|
||||
@ -468,18 +465,25 @@ mem_area_free(
|
||||
|
||||
area = (mem_area_t*) (((byte*)ptr) - MEM_AREA_EXTRA_SIZE);
|
||||
|
||||
if (mem_area_get_free(area)) {
|
||||
ut_sprintf_buf(err_buf, ((byte*)area) - 50, 100);
|
||||
if (mem_area_get_free(area)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Freeing element to mem pool free list though the\n"
|
||||
"InnoDB: element is marked free! Dump of 100 bytes around element:\n%s\n",
|
||||
err_buf);
|
||||
"InnoDB: element is marked free!\n");
|
||||
|
||||
mem_analyze_corruption((byte*)area);
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
size = mem_area_get_size(area);
|
||||
|
||||
ut_ad(size != 0);
|
||||
if (size == 0) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Mem area size is 0. Possibly a memory overrun of the\n"
|
||||
"InnoDB: previous allocated area!\n");
|
||||
|
||||
mem_analyze_corruption((byte*)area);
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
#ifdef UNIV_LIGHT_MEM_DEBUG
|
||||
if (((byte*)area) + size < pool->buf + pool->size) {
|
||||
@ -488,7 +492,15 @@ mem_area_free(
|
||||
|
||||
next_size = mem_area_get_size(
|
||||
(mem_area_t*)(((byte*)area) + size));
|
||||
ut_a(ut_2_power_up(next_size) == next_size);
|
||||
if (ut_2_power_up(next_size) != next_size) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Memory area size %lu, next area size %lu not a power of 2!\n"
|
||||
"InnoDB: Possibly a memory overrun of the buffer being freed here.\n",
|
||||
size, next_size);
|
||||
mem_analyze_corruption((byte*)area);
|
||||
|
||||
ut_a(0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
buddy = mem_area_get_buddy(area, size, pool);
|
||||
|
@ -322,13 +322,129 @@ row_ins_clust_index_entry_by_modify(
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Either deletes or sets the referencing columns SQL NULL in a child row.
|
||||
Used in ON DELETE ... clause for foreign keys when a parent row is
|
||||
deleted. */
|
||||
Returns TRUE if in a cascaded update/delete an ancestor node of node
|
||||
updates table. */
|
||||
static
|
||||
ibool
|
||||
row_ins_cascade_ancestor_updates_table(
|
||||
/*===================================*/
|
||||
/* out: TRUE if an ancestor updates table */
|
||||
que_node_t* node, /* in: node in a query graph */
|
||||
dict_table_t* table) /* in: table */
|
||||
{
|
||||
que_node_t* parent;
|
||||
upd_node_t* upd_node;
|
||||
|
||||
parent = que_node_get_parent(node);
|
||||
|
||||
while (que_node_get_type(parent) == QUE_NODE_UPDATE) {
|
||||
|
||||
upd_node = parent;
|
||||
|
||||
if (upd_node->table == table) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
parent = que_node_get_parent(parent);
|
||||
|
||||
ut_a(parent);
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Calculates the update vector node->cascade->update for a child table in
|
||||
a cascaded update. */
|
||||
static
|
||||
ulint
|
||||
row_ins_foreign_delete_or_set_null(
|
||||
/*===============================*/
|
||||
row_ins_cascade_calc_update_vec(
|
||||
/*============================*/
|
||||
/* out: number of fields in the
|
||||
calculated update vector; the value
|
||||
can also be 0 if no foreign key
|
||||
fields changed */
|
||||
upd_node_t* node, /* in: update node of the parent
|
||||
table */
|
||||
dict_foreign_t* foreign) /* in: foreign key constraint whose
|
||||
type is != 0 */
|
||||
{
|
||||
upd_node_t* cascade = node->cascade_node;
|
||||
dict_table_t* table = foreign->foreign_table;
|
||||
dict_index_t* index = foreign->foreign_index;
|
||||
upd_t* update;
|
||||
upd_field_t* ufield;
|
||||
dict_table_t* parent_table;
|
||||
dict_index_t* parent_index;
|
||||
upd_t* parent_update;
|
||||
upd_field_t* parent_ufield;
|
||||
ulint n_fields_updated;
|
||||
ulint parent_field_no;
|
||||
ulint i;
|
||||
ulint j;
|
||||
|
||||
ut_a(node && foreign && cascade && table && index);
|
||||
|
||||
/* Calculate the appropriate update vector which will set the fields
|
||||
in the child index record to the same value as the referenced index
|
||||
record will get in the update. */
|
||||
|
||||
parent_table = node->table;
|
||||
ut_a(parent_table == foreign->referenced_table);
|
||||
parent_index = foreign->referenced_index;
|
||||
parent_update = node->update;
|
||||
|
||||
update = cascade->update;
|
||||
|
||||
update->info_bits = 0;
|
||||
update->n_fields = foreign->n_fields;
|
||||
|
||||
n_fields_updated = 0;
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
|
||||
parent_field_no = dict_table_get_nth_col_pos(
|
||||
parent_table,
|
||||
dict_index_get_nth_col_no(
|
||||
parent_index, i));
|
||||
|
||||
for (j = 0; j < parent_update->n_fields; j++) {
|
||||
parent_ufield = parent_update->fields + j;
|
||||
|
||||
if (parent_ufield->field_no == parent_field_no) {
|
||||
|
||||
/* A field in the parent index record is
|
||||
updated. Let us make the update vector
|
||||
field for the child table. */
|
||||
|
||||
ufield = update->fields + n_fields_updated;
|
||||
|
||||
ufield->field_no =
|
||||
dict_table_get_nth_col_pos(table,
|
||||
dict_index_get_nth_col_no(index, i));
|
||||
ufield->exp = NULL;
|
||||
ufield->new_val = parent_ufield->new_val;
|
||||
ufield->extern_storage = FALSE;
|
||||
|
||||
n_fields_updated++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update->n_fields = n_fields_updated;
|
||||
|
||||
return(n_fields_updated);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Perform referential actions or checks when a parent row is deleted or updated
|
||||
and the constraint had an ON DELETE or ON UPDATE condition which was not
|
||||
RESTRICT. */
|
||||
static
|
||||
ulint
|
||||
row_ins_foreign_check_on_constraint(
|
||||
/*================================*/
|
||||
/* out: DB_SUCCESS, DB_LOCK_WAIT,
|
||||
or error code */
|
||||
que_thr_t* thr, /* in: query thread whose run_node
|
||||
@ -378,15 +494,34 @@ row_ins_foreign_delete_or_set_null(
|
||||
ut_strlen(table->name) + 1);
|
||||
node = thr->run_node;
|
||||
|
||||
ut_a(que_node_get_type(node) == QUE_NODE_UPDATE);
|
||||
if (node->is_delete && 0 == (foreign->type &
|
||||
(DICT_FOREIGN_ON_DELETE_CASCADE
|
||||
| DICT_FOREIGN_ON_DELETE_SET_NULL))) {
|
||||
|
||||
if (!node->is_delete) {
|
||||
/* According to SQL-92 an UPDATE with respect to FOREIGN
|
||||
KEY constraints is not semantically equivalent to a
|
||||
DELETE + INSERT. Therefore we do not perform any action
|
||||
here and consequently the child rows would be left
|
||||
orphaned if we would let the UPDATE happen. Thus we return
|
||||
an error. */
|
||||
/* No action is defined: return a foreign key error if
|
||||
NO ACTION is not specified */
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) {
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
return(DB_ROW_IS_REFERENCED);
|
||||
}
|
||||
|
||||
if (!node->is_delete && 0 == (foreign->type &
|
||||
(DICT_FOREIGN_ON_UPDATE_CASCADE
|
||||
| DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
|
||||
|
||||
/* This is an UPDATE */
|
||||
|
||||
/* No action is defined: return a foreign key error if
|
||||
NO ACTION is not specified */
|
||||
|
||||
if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) {
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
return(DB_ROW_IS_REFERENCED);
|
||||
}
|
||||
@ -411,7 +546,10 @@ row_ins_foreign_delete_or_set_null(
|
||||
|
||||
cascade->table = table;
|
||||
|
||||
if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE ) {
|
||||
cascade->foreign = foreign;
|
||||
|
||||
if (node->is_delete
|
||||
&& (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) {
|
||||
cascade->is_delete = TRUE;
|
||||
} else {
|
||||
cascade->is_delete = FALSE;
|
||||
@ -425,8 +563,30 @@ row_ins_foreign_delete_or_set_null(
|
||||
}
|
||||
}
|
||||
|
||||
/* We do not allow cyclic cascaded updating of the same
|
||||
table. Check that we are not updating the same table which
|
||||
is already being modified in this cascade chain. We have to
|
||||
check this because the modification of the indexes of a
|
||||
'parent' table may still be incomplete, and we must avoid
|
||||
seeing the indexes of the parent table in an inconsistent
|
||||
state! In this way we also prevent possible infinite
|
||||
update loops caused by cyclic cascaded updates. */
|
||||
|
||||
if (!cascade->is_delete
|
||||
&& row_ins_cascade_ancestor_updates_table(cascade, table)) {
|
||||
|
||||
/* We do not know if this would break foreign key
|
||||
constraints, but play safe and return an error */
|
||||
|
||||
err = DB_ROW_IS_REFERENCED;
|
||||
|
||||
goto nonstandard_exit_func;
|
||||
}
|
||||
|
||||
index = btr_pcur_get_btr_cur(pcur)->index;
|
||||
|
||||
ut_a(index == foreign->foreign_index);
|
||||
|
||||
rec = btr_pcur_get_rec(pcur);
|
||||
|
||||
if (index->type & DICT_CLUSTERED) {
|
||||
@ -520,7 +680,11 @@ row_ins_foreign_delete_or_set_null(
|
||||
goto nonstandard_exit_func;
|
||||
}
|
||||
|
||||
if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL) {
|
||||
if ((node->is_delete
|
||||
&& (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL))
|
||||
|| (!node->is_delete
|
||||
&& (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
|
||||
|
||||
/* Build the appropriate update vector which sets
|
||||
foreign->n_fields first fields in rec to SQL NULL */
|
||||
|
||||
@ -540,6 +704,26 @@ row_ins_foreign_delete_or_set_null(
|
||||
}
|
||||
}
|
||||
|
||||
if (!node->is_delete
|
||||
&& (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) {
|
||||
|
||||
/* Build the appropriate update vector which sets changing
|
||||
foreign->n_fields first fields in rec to new values */
|
||||
|
||||
row_ins_cascade_calc_update_vec(node, foreign);
|
||||
|
||||
if (cascade->update->n_fields == 0) {
|
||||
|
||||
/* The update does not change any columns referred
|
||||
to in this foreign key constraint: no need to do
|
||||
anything */
|
||||
|
||||
err = DB_SUCCESS;
|
||||
|
||||
goto nonstandard_exit_func;
|
||||
}
|
||||
}
|
||||
|
||||
/* Store pcur position and initialize or store the cascade node
|
||||
pcur stored position */
|
||||
|
||||
@ -629,6 +813,7 @@ row_ins_check_foreign_constraint(
|
||||
dtuple_t* entry, /* in: index entry for index */
|
||||
que_thr_t* thr) /* in: query thread */
|
||||
{
|
||||
upd_node_t* upd_node;
|
||||
dict_table_t* check_table;
|
||||
dict_index_t* check_index;
|
||||
ulint n_fields_cmp;
|
||||
@ -665,6 +850,30 @@ run_again:
|
||||
}
|
||||
}
|
||||
|
||||
if (que_node_get_type(thr->run_node) == QUE_NODE_UPDATE) {
|
||||
upd_node = thr->run_node;
|
||||
|
||||
if (!(upd_node->is_delete) && upd_node->foreign == foreign) {
|
||||
/* If a cascaded update is done as defined by a
|
||||
foreign key constraint, do not check that
|
||||
constraint for the child row. In ON UPDATE CASCADE
|
||||
the update of the parent row is only half done when
|
||||
we come here: if we would check the constraint here
|
||||
for the child row it would fail.
|
||||
|
||||
A QUESTION remains: if in the child table there are
|
||||
several constraints which refer to the same parent
|
||||
table, we should merge all updates to the child as
|
||||
one update? And the updates can be contradictory!
|
||||
Currently we just perform the update associated
|
||||
with each foreign key constraint, one after
|
||||
another, and the user has problems predicting in
|
||||
which order they are performed. */
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
}
|
||||
|
||||
if (check_ref) {
|
||||
check_table = foreign->referenced_table;
|
||||
check_index = foreign->referenced_index;
|
||||
@ -774,8 +983,12 @@ run_again:
|
||||
|
||||
break;
|
||||
} else if (foreign->type != 0) {
|
||||
/* There is an ON UPDATE or ON DELETE
|
||||
condition: check them in a separate
|
||||
function */
|
||||
|
||||
err =
|
||||
row_ins_foreign_delete_or_set_null(
|
||||
row_ins_foreign_check_on_constraint(
|
||||
thr, foreign, &pcur, &mtr);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
@ -313,6 +313,7 @@ row_create_prebuilt(
|
||||
prebuilt = mem_heap_alloc(heap, sizeof(row_prebuilt_t));
|
||||
|
||||
prebuilt->magic_n = ROW_PREBUILT_ALLOCATED;
|
||||
prebuilt->magic_n2 = ROW_PREBUILT_ALLOCATED;
|
||||
|
||||
prebuilt->table = table;
|
||||
|
||||
@ -378,11 +379,12 @@ row_prebuilt_free(
|
||||
{
|
||||
ulint i;
|
||||
|
||||
if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED) {
|
||||
if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED
|
||||
|| prebuilt->magic_n2 != ROW_PREBUILT_ALLOCATED) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to free a corrupt\n"
|
||||
"InnoDB: table handle. Magic n %lu, table name %s\n",
|
||||
prebuilt->magic_n, prebuilt->table->name);
|
||||
"InnoDB: Error: trying to free a corrupt\n"
|
||||
"InnoDB: table handle. Magic n %lu, magic n2 %lu, table name %s\n",
|
||||
prebuilt->magic_n, prebuilt->magic_n2, prebuilt->table->name);
|
||||
|
||||
mem_analyze_corruption((byte*)prebuilt);
|
||||
|
||||
@ -390,6 +392,7 @@ row_prebuilt_free(
|
||||
}
|
||||
|
||||
prebuilt->magic_n = ROW_PREBUILT_FREED;
|
||||
prebuilt->magic_n2 = ROW_PREBUILT_FREED;
|
||||
|
||||
btr_pcur_free_for_mysql(prebuilt->pcur);
|
||||
btr_pcur_free_for_mysql(prebuilt->clust_pcur);
|
||||
@ -420,7 +423,23 @@ row_prebuilt_free(
|
||||
|
||||
for (i = 0; i < MYSQL_FETCH_CACHE_SIZE; i++) {
|
||||
if (prebuilt->fetch_cache[i] != NULL) {
|
||||
mem_free(prebuilt->fetch_cache[i]);
|
||||
|
||||
if ((ROW_PREBUILT_FETCH_MAGIC_N !=
|
||||
mach_read_from_4((prebuilt->fetch_cache[i]) - 4))
|
||||
|| (ROW_PREBUILT_FETCH_MAGIC_N !=
|
||||
mach_read_from_4((prebuilt->fetch_cache[i])
|
||||
+ prebuilt->mysql_row_len))) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to free a corrupt\n"
|
||||
"InnoDB: fetch buffer.\n");
|
||||
|
||||
mem_analyze_corruption(
|
||||
prebuilt->fetch_cache[i]);
|
||||
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
mem_free((prebuilt->fetch_cache[i]) - 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1435,7 +1454,7 @@ int
|
||||
row_create_index_for_mysql(
|
||||
/*=======================*/
|
||||
/* out: error number or DB_SUCCESS */
|
||||
dict_index_t* index, /* in: index defintion */
|
||||
dict_index_t* index, /* in: index definition */
|
||||
trx_t* trx) /* in: transaction handle */
|
||||
{
|
||||
ind_node_t* node;
|
||||
@ -1444,6 +1463,8 @@ row_create_index_for_mysql(
|
||||
ulint namelen;
|
||||
ulint keywordlen;
|
||||
ulint err;
|
||||
ulint i;
|
||||
ulint j;
|
||||
|
||||
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
@ -1465,6 +1486,31 @@ row_create_index_for_mysql(
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/* Check that the same column does not appear twice in the index.
|
||||
InnoDB assumes this in its algorithms, e.g., update of an index
|
||||
entry */
|
||||
|
||||
for (i = 0; i < dict_index_get_n_fields(index); i++) {
|
||||
for (j = 0; j < i; j++) {
|
||||
if (0 == ut_strcmp(
|
||||
dict_index_get_nth_field(index, j)->name,
|
||||
dict_index_get_nth_field(index, i)->name)) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: column %s appears twice in index %s of table %s\n"
|
||||
"InnoDB: This is not allowed in InnoDB.\n",
|
||||
dict_index_get_nth_field(index, i)->name,
|
||||
index->name, index->table_name);
|
||||
|
||||
err = DB_COL_APPEARS_TWICE_IN_INDEX;
|
||||
|
||||
goto error_handling;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
heap = mem_heap_create(512);
|
||||
|
||||
trx->dict_operation = TRUE;
|
||||
@ -1477,11 +1523,13 @@ row_create_index_for_mysql(
|
||||
SESS_COMM_EXECUTE, 0));
|
||||
que_run_threads(thr);
|
||||
|
||||
err = trx->error_state;
|
||||
err = trx->error_state;
|
||||
|
||||
que_graph_free((que_t*) que_node_get_parent(thr));
|
||||
|
||||
error_handling:
|
||||
if (err != DB_SUCCESS) {
|
||||
/* We have special error handling here */
|
||||
ut_a(err == DB_OUT_OF_FILE_SPACE);
|
||||
|
||||
trx->error_state = DB_SUCCESS;
|
||||
|
||||
@ -1492,8 +1540,6 @@ row_create_index_for_mysql(
|
||||
trx->error_state = DB_SUCCESS;
|
||||
}
|
||||
|
||||
que_graph_free((que_t*) que_node_get_parent(thr));
|
||||
|
||||
trx->op_info = (char *) "";
|
||||
|
||||
return((int) err);
|
||||
|
@ -2415,6 +2415,7 @@ row_sel_push_cache_row_for_mysql(
|
||||
row_prebuilt_t* prebuilt, /* in: prebuilt struct */
|
||||
rec_t* rec) /* in: record to push */
|
||||
{
|
||||
byte* buf;
|
||||
ulint i;
|
||||
|
||||
ut_ad(prebuilt->n_fetch_cached < MYSQL_FETCH_CACHE_SIZE);
|
||||
@ -2424,8 +2425,18 @@ row_sel_push_cache_row_for_mysql(
|
||||
/* Allocate memory for the fetch cache */
|
||||
|
||||
for (i = 0; i < MYSQL_FETCH_CACHE_SIZE; i++) {
|
||||
prebuilt->fetch_cache[i] = mem_alloc(
|
||||
prebuilt->mysql_row_len);
|
||||
|
||||
/* A user has reported memory corruption in these
|
||||
buffers in Linux. Put magic numbers there to help
|
||||
to track a possible bug. */
|
||||
|
||||
buf = mem_alloc(prebuilt->mysql_row_len + 8);
|
||||
|
||||
prebuilt->fetch_cache[i] = buf + 4;
|
||||
|
||||
mach_write_to_4(buf, ROW_PREBUILT_FETCH_MAGIC_N);
|
||||
mach_write_to_4(buf + 4 + prebuilt->mysql_row_len,
|
||||
ROW_PREBUILT_FETCH_MAGIC_N);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,6 +71,20 @@ the x-latch freed? The most efficient way for performing a
|
||||
searched delete is obviously to keep the x-latch for several
|
||||
steps of query graph execution. */
|
||||
|
||||
/***************************************************************
|
||||
Checks if an update vector changes some of the first fields of an index
|
||||
record. */
|
||||
static
|
||||
ibool
|
||||
row_upd_changes_first_fields(
|
||||
/*=========================*/
|
||||
/* out: TRUE if changes */
|
||||
dtuple_t* entry, /* in: old value of index entry */
|
||||
dict_index_t* index, /* in: index of entry */
|
||||
upd_t* update, /* in: update vector for the row */
|
||||
ulint n); /* in: how many first fields to check */
|
||||
|
||||
|
||||
/*************************************************************************
|
||||
Checks if index currently is mentioned as a referenced index in a foreign
|
||||
key constraint. */
|
||||
@ -132,6 +146,7 @@ ulint
|
||||
row_upd_check_references_constraints(
|
||||
/*=================================*/
|
||||
/* out: DB_SUCCESS or an error code */
|
||||
upd_node_t* node, /* in: row update node */
|
||||
btr_pcur_t* pcur, /* in: cursor positioned on a record; NOTE: the
|
||||
cursor position is lost in this function! */
|
||||
dict_table_t* table, /* in: table in question */
|
||||
@ -173,7 +188,16 @@ row_upd_check_references_constraints(
|
||||
foreign = UT_LIST_GET_FIRST(table->referenced_list);
|
||||
|
||||
while (foreign) {
|
||||
if (foreign->referenced_index == index) {
|
||||
/* Note that we may have an update which updates the index
|
||||
record, but does NOT update the first fields which are
|
||||
referenced in a foreign key constraint. Then the update does
|
||||
NOT break the constraint. */
|
||||
|
||||
if (foreign->referenced_index == index
|
||||
&& (node->is_delete
|
||||
|| row_upd_changes_first_fields(entry, index,
|
||||
node->update, foreign->n_fields))) {
|
||||
|
||||
if (foreign->foreign_table == NULL) {
|
||||
dict_table_get(foreign->foreign_table_name,
|
||||
trx);
|
||||
@ -189,10 +213,9 @@ row_upd_check_references_constraints(
|
||||
}
|
||||
|
||||
/* NOTE that if the thread ends up waiting for a lock
|
||||
we will release dict_operation_lock
|
||||
temporarily! But the counter on the table
|
||||
protects 'foreign' from being dropped while the check
|
||||
is running. */
|
||||
we will release dict_operation_lock temporarily!
|
||||
But the counter on the table protects 'foreign' from
|
||||
being dropped while the check is running. */
|
||||
|
||||
err = row_ins_check_foreign_constraint(FALSE, foreign,
|
||||
table, index, entry, thr);
|
||||
@ -255,6 +278,7 @@ upd_node_create(
|
||||
node->index = NULL;
|
||||
node->update = NULL;
|
||||
|
||||
node->foreign = NULL;
|
||||
node->cascade_heap = NULL;
|
||||
node->cascade_node = NULL;
|
||||
|
||||
@ -953,6 +977,53 @@ row_upd_changes_some_index_ord_field_binary(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/***************************************************************
|
||||
Checks if an update vector changes some of the first fields of an index
|
||||
record. */
|
||||
static
|
||||
ibool
|
||||
row_upd_changes_first_fields(
|
||||
/*=========================*/
|
||||
/* out: TRUE if changes */
|
||||
dtuple_t* entry, /* in: index entry */
|
||||
dict_index_t* index, /* in: index of entry */
|
||||
upd_t* update, /* in: update vector for the row */
|
||||
ulint n) /* in: how many first fields to check */
|
||||
{
|
||||
upd_field_t* upd_field;
|
||||
dict_field_t* ind_field;
|
||||
dict_col_t* col;
|
||||
ulint n_upd_fields;
|
||||
ulint col_pos;
|
||||
ulint i, j;
|
||||
|
||||
ut_a(update && index);
|
||||
ut_a(n <= dict_index_get_n_fields(index));
|
||||
|
||||
n_upd_fields = upd_get_n_fields(update);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
|
||||
ind_field = dict_index_get_nth_field(index, i);
|
||||
col = dict_field_get_col(ind_field);
|
||||
col_pos = dict_col_get_clust_pos(col);
|
||||
|
||||
for (j = 0; j < n_upd_fields; j++) {
|
||||
|
||||
upd_field = upd_get_nth_field(update, j);
|
||||
|
||||
if (col_pos == upd_field->field_no
|
||||
&& cmp_dfield_dfield(
|
||||
dtuple_get_nth_field(entry, i),
|
||||
&(upd_field->new_val))) {
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Copies the column values from a record. */
|
||||
UNIV_INLINE
|
||||
@ -1106,9 +1177,11 @@ row_upd_sec_index_entry(
|
||||
err = btr_cur_del_mark_set_sec_rec(0, btr_cur, TRUE,
|
||||
thr, &mtr);
|
||||
if (err == DB_SUCCESS && check_ref) {
|
||||
|
||||
/* NOTE that the following call loses
|
||||
the position of pcur ! */
|
||||
err = row_upd_check_references_constraints(
|
||||
node,
|
||||
&pcur, index->table,
|
||||
index, thr, &mtr);
|
||||
if (err != DB_SUCCESS) {
|
||||
@ -1224,7 +1297,7 @@ row_upd_clust_rec_by_insert(
|
||||
if (check_ref) {
|
||||
/* NOTE that the following call loses
|
||||
the position of pcur ! */
|
||||
err = row_upd_check_references_constraints(
|
||||
err = row_upd_check_references_constraints(node,
|
||||
pcur, table,
|
||||
index, thr, mtr);
|
||||
if (err != DB_SUCCESS) {
|
||||
@ -1392,7 +1465,8 @@ row_upd_del_mark_clust_rec(
|
||||
if (err == DB_SUCCESS && check_ref) {
|
||||
/* NOTE that the following call loses the position of pcur ! */
|
||||
|
||||
err = row_upd_check_references_constraints(pcur, index->table,
|
||||
err = row_upd_check_references_constraints(node,
|
||||
pcur, index->table,
|
||||
index, thr, mtr);
|
||||
if (err != DB_SUCCESS) {
|
||||
mtr_commit(mtr);
|
||||
|
@ -529,6 +529,9 @@ open_or_create_log_file(
|
||||
new database */
|
||||
ibool* log_file_created, /* out: TRUE if new log file
|
||||
created */
|
||||
ibool log_file_has_been_opened,/* in: TRUE if a log file has been
|
||||
opened before: then it is an error
|
||||
to try to create another log file */
|
||||
ulint k, /* in: log group number */
|
||||
ulint i) /* in: log file number in group */
|
||||
{
|
||||
@ -587,6 +590,11 @@ open_or_create_log_file(
|
||||
fprintf(stderr,
|
||||
" InnoDB: Log file %s did not exist: new to be created\n",
|
||||
name);
|
||||
if (log_file_has_been_opened) {
|
||||
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
fprintf(stderr, "InnoDB: Setting log file %s size to %lu MB\n",
|
||||
name, srv_log_file_size
|
||||
>> (20 - UNIV_PAGE_SIZE_SHIFT));
|
||||
@ -1160,7 +1168,8 @@ innobase_start_or_create_for_mysql(void)
|
||||
for (i = 0; i < srv_n_log_files; i++) {
|
||||
|
||||
err = open_or_create_log_file(create_new_db,
|
||||
&log_file_created, k, i);
|
||||
&log_file_created,
|
||||
log_opened, k, i);
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
return((int) err);
|
||||
|
@ -262,7 +262,7 @@ ut_print_buf(
|
||||
data = buf;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (isprint((char)(*data))) {
|
||||
if (isprint((int)(*data))) {
|
||||
printf("%c", (char)*data);
|
||||
}
|
||||
data++;
|
||||
@ -302,7 +302,7 @@ ut_sprintf_buf(
|
||||
data = buf;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (isprint((char)(*data))) {
|
||||
if (isprint((int)(*data))) {
|
||||
n += sprintf(str + n, "%c", (char)*data);
|
||||
} else {
|
||||
n += sprintf(str + n, ".");
|
||||
|
@ -4709,7 +4709,10 @@ join_read_const(JOIN_TAB *tab)
|
||||
empty_record(table);
|
||||
if (error != HA_ERR_KEY_NOT_FOUND)
|
||||
{
|
||||
sql_print_error("read_const: Got error %d when reading table %s",
|
||||
/* Locking reads can legally return also these errors, do not
|
||||
print them to the .err log */
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_const: Got error %d when reading table %s",
|
||||
error, table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4772,7 +4775,8 @@ join_read_always_key(JOIN_TAB *tab)
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND)
|
||||
{
|
||||
sql_print_error("read_const: Got error %d when reading table %s",error,
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_const: Got error %d when reading table %s",error,
|
||||
table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4801,7 +4805,8 @@ join_read_last_key(JOIN_TAB *tab)
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND)
|
||||
{
|
||||
sql_print_error("read_const: Got error %d when reading table %s",error,
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_const: Got error %d when reading table %s",error,
|
||||
table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4833,7 +4838,8 @@ join_read_next_same(READ_RECORD *info)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_next: Got error %d when reading table %s",error,
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_next: Got error %d when reading table %s",error,
|
||||
table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4855,7 +4861,8 @@ join_read_prev_same(READ_RECORD *info)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_next: Got error %d when reading table %s",error,
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_next: Got error %d when reading table %s",error,
|
||||
table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
error= 1;
|
||||
@ -4926,7 +4933,8 @@ join_read_first(JOIN_TAB *tab)
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_first_with_key: Got error %d when reading table",
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_first_with_key: Got error %d when reading table",
|
||||
error);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4945,7 +4953,9 @@ join_read_next(READ_RECORD *info)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_next_with_key: Got error %d when reading table %s",
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error(
|
||||
"read_next_with_key: Got error %d when reading table %s",
|
||||
error, info->table->path);
|
||||
info->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4977,7 +4987,8 @@ join_read_last(JOIN_TAB *tab)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_last_with_key: Got error %d when reading table",
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("read_last_with_key: Got error %d when reading table",
|
||||
error, table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -4996,7 +5007,9 @@ join_read_prev(READ_RECORD *info)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_prev_with_key: Got error %d when reading table: %s",
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error(
|
||||
"read_prev_with_key: Got error %d when reading table: %s",
|
||||
error,info->table->path);
|
||||
info->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -5024,7 +5037,8 @@ join_ft_read_first(JOIN_TAB *tab)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("ft_read_first: Got error %d when reading table %s",
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("ft_read_first: Got error %d when reading table %s",
|
||||
error, table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
@ -5042,7 +5056,8 @@ join_ft_read_next(READ_RECORD *info)
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("ft_read_next: Got error %d when reading table %s",
|
||||
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
sql_print_error("ft_read_next: Got error %d when reading table %s",
|
||||
error, info->table->path);
|
||||
info->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
|
Reference in New Issue
Block a user