1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

cleanup: prepare "update_handler" for WITHOUT OVERLAPS

* rename to a generic name
* move remaning initializations from query exec to prepare time
* simplify/unify key handling in open_table_from_share and delayed
* remove dead code
* move tests where they belong
This commit is contained in:
Sergei Golubchik
2020-03-05 19:19:57 +01:00
parent 045510cb92
commit 0515577d12
17 changed files with 200 additions and 274 deletions

View File

@@ -1492,13 +1492,4 @@ DROP TABLE t1, t2;
# #
CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam; CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam;
ERROR HY000: AUTO_INCREMENT column `b` cannot be used in the UNIQUE index `a` ERROR HY000: AUTO_INCREMENT column `b` cannot be used in the UNIQUE index `a`
#
# MDEV-21819 Assertion `inited == NONE || update_handler != this'
# failed in handler::ha_write_row
#
CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
ERROR 23000: Duplicate entry 'foo' for key 'b'
DROP TABLE t1;
set @@GLOBAL.max_allowed_packet= @allowed_packet; set @@GLOBAL.max_allowed_packet= @allowed_packet;

View File

@@ -573,16 +573,5 @@ DROP TABLE t1, t2;
--error ER_NO_AUTOINCREMENT_WITH_UNIQUE --error ER_NO_AUTOINCREMENT_WITH_UNIQUE
CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam; CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam;
--echo #
--echo # MDEV-21819 Assertion `inited == NONE || update_handler != this'
--echo # failed in handler::ha_write_row
--echo #
CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
--error ER_DUP_ENTRY
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
DROP TABLE t1;
# Cleanup # Cleanup
set @@GLOBAL.max_allowed_packet= @allowed_packet; set @@GLOBAL.max_allowed_packet= @allowed_packet;

View File

@@ -0,0 +1,17 @@
#
# Assertion `inited == NONE || update_handler != this' failed in
# handler::ha_write_row
#
CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM;
INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31');
DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01';
DROP TABLE t1;
#
# MDEV-21819 Assertion `inited == NONE || update_handler != this'
# failed in handler::ha_write_row
#
CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
ERROR 23000: Duplicate entry 'foo' for key 'b'
DROP TABLE t1;

View File

@@ -0,0 +1,23 @@
--source include/have_partition.inc
--echo #
--echo # Assertion `inited == NONE || update_handler != this' failed in
--echo # handler::ha_write_row
--echo #
CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM;
INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31');
DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01';
DROP TABLE t1;
--echo #
--echo # MDEV-21819 Assertion `inited == NONE || update_handler != this'
--echo # failed in handler::ha_write_row
--echo #
CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
--error ER_DUP_ENTRY
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
DROP TABLE t1;

View File

@@ -1,8 +0,0 @@
#
# Assertion `inited == NONE || update_handler != this' failed in
# handler::ha_write_row
#
CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM;
INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31');
DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01';
DROP TABLE t1;

View File

@@ -1,9 +0,0 @@
--echo #
--echo # Assertion `inited == NONE || update_handler != this' failed in
--echo # handler::ha_write_row
--echo #
CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM;
INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31');
DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01';
DROP TABLE t1;

View File

@@ -4352,16 +4352,6 @@ int ha_partition::write_row(const uchar * buf)
} }
m_last_part= part_id; m_last_part= part_id;
DBUG_PRINT("info", ("Insert in partition %u", part_id)); DBUG_PRINT("info", ("Insert in partition %u", part_id));
/*
We have to call prepare_for_insert() if we have an update handler
in the underlying table (to clone the handler). This is because for
INSERT's prepare_for_insert() is only called for the main table,
not for all partitions. This is to reduce the huge overhead of cloning
a possible not needed handler if there are many partitions.
*/
if (table->s->long_unique_table &&
m_file[part_id]->update_handler == m_file[part_id] && inited == RND)
m_file[part_id]->prepare_for_insert(0);
start_part_bulk_insert(thd, part_id); start_part_bulk_insert(thd, part_id);
@@ -9940,8 +9930,13 @@ void ha_partition::print_error(int error, myf errflag)
/* fall through to generic error handling. */ /* fall through to generic error handling. */
} }
/* In case m_file has not been initialized, like in bug#42438 */ /*
if (m_file) We choose a main handler's print_error if:
* m_file has not been initialized, like in bug#42438
* lookup_errkey is set, which means that an error has occured in the
main handler, not in individual partitions
*/
if (m_file && lookup_errkey == (uint)-1)
{ {
if (m_last_part >= m_tot_parts) if (m_last_part >= m_tot_parts)
{ {

View File

@@ -2753,7 +2753,6 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
HA_OPEN_IGNORE_IF_LOCKED, mem_root)) HA_OPEN_IGNORE_IF_LOCKED, mem_root))
goto err; goto err;
new_handler->update_handler= new_handler;
return new_handler; return new_handler;
err: err:
@@ -2763,36 +2762,20 @@ err:
/** /**
Creates a clone of handler used in update for unique hash key. clone of current handler.
*/
bool handler::clone_handler_for_update() Creates a clone of handler used for unique hash key and WITHOUT OVERLAPS.
@return error code
*/
int handler::create_lookup_handler()
{ {
handler *tmp; handler *tmp;
DBUG_ASSERT(table->s->long_unique_table); if (lookup_handler != this)
return 0;
if (update_handler != this)
return 0; // Already done
if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root))) if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root)))
return 1; return 1;
update_handler= tmp; lookup_handler= tmp;
/* The update handler is only used to check if a row exists */ return lookup_handler->ha_external_lock(table->in_use, F_RDLCK);
update_handler->ha_external_lock(table->in_use, F_RDLCK);
return 0;
}
/**
Delete update handler object if it exists
*/
void handler::delete_update_handler()
{
if (update_handler != this)
{
update_handler->ha_external_lock(table->in_use, F_UNLCK);
update_handler->ha_close();
delete update_handler;
}
update_handler= this;
} }
LEX_CSTRING *handler::engine_name() LEX_CSTRING *handler::engine_name()
@@ -2952,7 +2935,6 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
} }
reset_statistics(); reset_statistics();
internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE); internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE);
update_handler= this;
DBUG_RETURN(error); DBUG_RETURN(error);
} }
@@ -4349,15 +4331,17 @@ uint handler::get_dup_key(int error)
{ {
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK); DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK);
DBUG_ENTER("handler::get_dup_key"); DBUG_ENTER("handler::get_dup_key");
if (table->s->long_unique_table && table->file->errkey < table->s->keys)
DBUG_RETURN(table->file->errkey); if (lookup_errkey != (uint)-1)
table->file->errkey = (uint) -1; DBUG_RETURN(errkey= lookup_errkey);
errkey= (uint)-1;
if (error == HA_ERR_FOUND_DUPP_KEY || if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOREIGN_DUPLICATE_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL || error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL ||
error == HA_ERR_DROP_INDEX_FK) error == HA_ERR_DROP_INDEX_FK)
table->file->info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK); info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
DBUG_RETURN(table->file->errkey); DBUG_RETURN(errkey);
} }
@@ -6497,6 +6481,14 @@ int handler::ha_external_lock(THD *thd, int lock_type)
mysql_audit_external_lock(thd, table_share, lock_type); mysql_audit_external_lock(thd, table_share, lock_type);
} }
if (lock_type == F_UNLCK && lookup_handler != this)
{
lookup_handler->ha_external_lock(table->in_use, F_UNLCK);
lookup_handler->close();
delete lookup_handler;
lookup_handler= this;
}
if (MYSQL_HANDLER_RDLOCK_DONE_ENABLED() || if (MYSQL_HANDLER_RDLOCK_DONE_ENABLED() ||
MYSQL_HANDLER_WRLOCK_DONE_ENABLED() || MYSQL_HANDLER_WRLOCK_DONE_ENABLED() ||
MYSQL_HANDLER_UNLOCK_DONE_ENABLED()) MYSQL_HANDLER_UNLOCK_DONE_ENABLED())
@@ -6535,8 +6527,6 @@ int handler::ha_reset()
DBUG_ASSERT(inited == NONE); DBUG_ASSERT(inited == NONE);
/* reset the bitmaps to point to defaults */ /* reset the bitmaps to point to defaults */
table->default_column_bitmaps(); table->default_column_bitmaps();
if (update_handler != this)
delete_update_handler();
pushed_cond= NULL; pushed_cond= NULL;
tracker= NULL; tracker= NULL;
mark_trx_read_write_done= 0; mark_trx_read_write_done= 0;
@@ -6580,15 +6570,13 @@ static int wsrep_after_row(THD *thd)
Check if there is a conflicting unique hash key Check if there is a conflicting unique hash key
*/ */
static int check_duplicate_long_entry_key(TABLE *table, handler *handler, int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no)
const uchar *new_rec, uint key_no)
{ {
Field *hash_field;
int result, error= 0; int result, error= 0;
KEY *key_info= table->key_info + key_no; KEY *key_info= table->key_info + key_no;
hash_field= key_info->key_part->field; Field *hash_field= key_info->key_part->field;
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
DBUG_ENTER("check_duplicate_long_entry_key"); DBUG_ENTER("handler::check_duplicate_long_entry_key");
DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) || key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) ||
@@ -6599,15 +6587,11 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler,
key_copy(ptr, new_rec, key_info, key_info->key_length, false); key_copy(ptr, new_rec, key_info, key_info->key_length, false);
if (!table->check_unique_buf) result= lookup_handler->ha_index_init(key_no, 0);
table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
table->s->reclength);
result= handler->ha_index_init(key_no, 0);
if (result) if (result)
DBUG_RETURN(result); DBUG_RETURN(result);
store_record(table, check_unique_buf); store_record(table, file->lookup_buffer);
result= handler->ha_index_read_map(table->record[0], result= lookup_handler->ha_index_read_map(table->record[0],
ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
if (!result) if (!result)
{ {
@@ -6618,7 +6602,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler,
uint arg_count= temp->argument_count(); uint arg_count= temp->argument_count();
do do
{ {
my_ptrdiff_t diff= table->check_unique_buf - new_rec; my_ptrdiff_t diff= table->file->lookup_buffer - new_rec;
is_same= true; is_same= true;
for (uint j=0; is_same && j < arg_count; j++) for (uint j=0; is_same && j < arg_count; j++)
{ {
@@ -6643,8 +6627,9 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler,
} }
} }
} }
while (!is_same && !(result= handler->ha_index_next_same(table->record[0], while (!is_same &&
ptr, key_info->key_length))); !(result= lookup_handler->ha_index_next_same(table->record[0],
ptr, key_info->key_length)));
if (is_same) if (is_same)
error= HA_ERR_FOUND_DUPP_KEY; error= HA_ERR_FOUND_DUPP_KEY;
goto exit; goto exit;
@@ -6654,33 +6639,40 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler,
exit: exit:
if (error == HA_ERR_FOUND_DUPP_KEY) if (error == HA_ERR_FOUND_DUPP_KEY)
{ {
table->file->errkey= key_no; table->file->lookup_errkey= key_no;
if (handler->ha_table_flags() & HA_DUPLICATE_POS) if (ha_table_flags() & HA_DUPLICATE_POS)
{ {
handler->position(table->record[0]); lookup_handler->position(table->record[0]);
memcpy(table->file->dup_ref, handler->ref, handler->ref_length); memcpy(table->file->dup_ref, lookup_handler->ref, ref_length);
} }
} }
restore_record(table, check_unique_buf); restore_record(table, file->lookup_buffer);
handler->ha_index_end(); lookup_handler->ha_index_end();
DBUG_RETURN(error); DBUG_RETURN(error);
} }
void handler::alloc_lookup_buffer()
{
if (!lookup_buffer)
lookup_buffer= (uchar*)alloc_root(&table->mem_root,
table_share->max_unique_length
+ table_share->null_fields
+ table_share->reclength);
}
/** @brief /** @brief
check whether inserted records breaks the check whether inserted records breaks the
unique constraint on long columns. unique constraint on long columns.
@returns 0 if no duplicate else returns error @returns 0 if no duplicate else returns error
*/ */
int handler::check_duplicate_long_entries(const uchar *new_rec)
static int check_duplicate_long_entries(TABLE *table, handler *handler,
const uchar *new_rec)
{ {
table->file->errkey= -1; lookup_errkey= (uint)-1;
for (uint i= 0; i < table->s->keys; i++) for (uint i= 0; i < table->s->keys; i++)
{ {
int result; int result;
if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH && if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
(result= check_duplicate_long_entry_key(table, handler, new_rec, i))) (result= check_duplicate_long_entry_key(new_rec, i)))
return result; return result;
} }
return 0; return 0;
@@ -6701,8 +6693,7 @@ static int check_duplicate_long_entries(TABLE *table, handler *handler,
key as a parameter in normal insert key should be -1 key as a parameter in normal insert key should be -1
@returns 0 if no duplicate else returns error @returns 0 if no duplicate else returns error
*/ */
int handler::check_duplicate_long_entries_update(const uchar *new_rec)
static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec)
{ {
Field *field; Field *field;
uint key_parts; uint key_parts;
@@ -6713,7 +6704,7 @@ static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec)
with respect to fields in hash_str with respect to fields in hash_str
*/ */
uint reclength= (uint) (table->record[1] - table->record[0]); uint reclength= (uint) (table->record[1] - table->record[0]);
table->file->clone_handler_for_update();
for (uint i= 0; i < table->s->keys; i++) for (uint i= 0; i < table->s->keys; i++)
{ {
keyinfo= table->key_info + i; keyinfo= table->key_info + i;
@@ -6728,9 +6719,7 @@ static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec)
/* Compare fields if they are different then check for duplicates */ /* Compare fields if they are different then check for duplicates */
if (field->cmp_binary_offset(reclength)) if (field->cmp_binary_offset(reclength))
{ {
if ((error= (check_duplicate_long_entry_key(table, if((error= check_duplicate_long_entry_key(new_rec, i)))
table->file->update_handler,
new_rec, i))))
return error; return error;
/* /*
break because check_duplicate_long_entries_key will break because check_duplicate_long_entries_key will
@@ -6815,24 +6804,20 @@ bool handler::prepare_for_row_logging()
/* /*
Do all initialization needed for insert Do all initialization needed for insert
@param force_update_handler Set to TRUE if we should always create an
update handler. Needed if we don't know if we
are going to do inserts while a scan is in
progress.
*/ */
int handler::prepare_for_insert(bool force_update_handler) int handler::prepare_for_insert()
{ {
/* Preparation for unique of blob's */ /* Preparation for unique of blob's */
if (table->s->long_unique_table && (inited == RND || force_update_handler)) if (table->s->long_unique_table)
{ {
/* /*
When doing a scan we can't use the same handler to check When doing a scan we can't use the same handler to check
duplicate rows. Create a new temporary one duplicate rows. Create a new temporary one
*/ */
if (clone_handler_for_update()) if (inited != NONE && create_lookup_handler())
return 1; return 1;
alloc_lookup_buffer();
} }
return 0; return 0;
} }
@@ -6852,8 +6837,8 @@ int handler::ha_write_row(const uchar *buf)
if (table->s->long_unique_table && this == table->file) if (table->s->long_unique_table && this == table->file)
{ {
DBUG_ASSERT(inited == NONE || update_handler != this); DBUG_ASSERT(inited == NONE || lookup_handler != this);
if ((error= check_duplicate_long_entries(table, update_handler, buf))) if ((error= check_duplicate_long_entries(buf)))
DBUG_RETURN(error); DBUG_RETURN(error);
} }
TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error, TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error,
@@ -6898,8 +6883,10 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
mark_trx_read_write(); mark_trx_read_write();
increment_statistics(&SSV::ha_update_count); increment_statistics(&SSV::ha_update_count);
if (table->s->long_unique_table && if (table->s->long_unique_table &&
(error= check_duplicate_long_entries_update(table, (uchar*) new_data))) (error= check_duplicate_long_entries_update(new_data)))
{
return error; return error;
}
TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, 0, TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, 0,
{ error= update_row(old_data, new_data);}) { error= update_row(old_data, new_data);})

View File

@@ -3025,10 +3025,12 @@ protected:
Table_flags cached_table_flags; /* Set on init() and open() */ Table_flags cached_table_flags; /* Set on init() and open() */
ha_rows estimation_rows_to_insert; ha_rows estimation_rows_to_insert;
handler *lookup_handler;
public: public:
handlerton *ht; /* storage engine of this handler */ handlerton *ht; /* storage engine of this handler */
uchar *ref; /* Pointer to current row */ uchar *ref; /* Pointer to current row */
uchar *dup_ref; /* Pointer to duplicate row */ uchar *dup_ref; /* Pointer to duplicate row */
uchar *lookup_buffer;
ha_statistics stats; ha_statistics stats;
@@ -3061,6 +3063,7 @@ public:
*/ */
bool in_range_check_pushed_down; bool in_range_check_pushed_down;
uint lookup_errkey;
uint errkey; /* Last dup key */ uint errkey; /* Last dup key */
uint key_used_on_scan; uint key_used_on_scan;
uint active_index, keyread; uint active_index, keyread;
@@ -3068,7 +3071,6 @@ public:
/** Length of ref (1-8 or the clustered key length) */ /** Length of ref (1-8 or the clustered key length) */
uint ref_length; uint ref_length;
FT_INFO *ft_handler; FT_INFO *ft_handler;
handler *update_handler; /* Handler used in case of update */
enum init_stat { NONE=0, INDEX, RND }; enum init_stat { NONE=0, INDEX, RND };
init_stat inited, pre_inited; init_stat inited, pre_inited;
@@ -3225,13 +3227,14 @@ private:
public: public:
handler(handlerton *ht_arg, TABLE_SHARE *share_arg) handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0), :table_share(share_arg), table(0),
estimation_rows_to_insert(0), ht(ht_arg), estimation_rows_to_insert(0),
ref(0), end_range(NULL), lookup_handler(this),
ht(ht_arg), ref(0), lookup_buffer(NULL), end_range(NULL),
implicit_emptied(0), implicit_emptied(0),
mark_trx_read_write_done(0), mark_trx_read_write_done(0),
check_table_binlog_row_based_done(0), check_table_binlog_row_based_done(0),
check_table_binlog_row_based_result(0), check_table_binlog_row_based_result(0),
in_range_check_pushed_down(FALSE), errkey(-1), in_range_check_pushed_down(FALSE), lookup_errkey(-1), errkey(-1),
key_used_on_scan(MAX_KEY), key_used_on_scan(MAX_KEY),
active_index(MAX_KEY), keyread(MAX_KEY), active_index(MAX_KEY), keyread(MAX_KEY),
ref_length(sizeof(my_off_t)), ref_length(sizeof(my_off_t)),
@@ -3268,8 +3271,6 @@ public:
return ref != 0; return ref != 0;
} }
virtual handler *clone(const char *name, MEM_ROOT *mem_root); virtual handler *clone(const char *name, MEM_ROOT *mem_root);
bool clone_handler_for_update();
void delete_update_handler();
/** This is called after create to allow us to set up cached variables */ /** This is called after create to allow us to set up cached variables */
void init() void init()
{ {
@@ -4646,7 +4647,7 @@ protected:
public: public:
bool check_table_binlog_row_based(); bool check_table_binlog_row_based();
bool prepare_for_row_logging(); bool prepare_for_row_logging();
int prepare_for_insert(bool force_update_handler= 0); int prepare_for_insert();
int binlog_log_row(TABLE *table, int binlog_log_row(TABLE *table,
const uchar *before_record, const uchar *before_record,
const uchar *after_record, const uchar *after_record,
@@ -4671,6 +4672,12 @@ private:
void mark_trx_read_write_internal(); void mark_trx_read_write_internal();
bool check_table_binlog_row_based_internal(); bool check_table_binlog_row_based_internal();
int create_lookup_handler();
void alloc_lookup_buffer();
int check_duplicate_long_entries(const uchar *new_rec);
int check_duplicate_long_entries_update(const uchar *new_rec);
int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no);
protected: protected:
/* /*
These are intended to be used only by handler::ha_xxxx() functions These are intended to be used only by handler::ha_xxxx() functions

View File

@@ -753,7 +753,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (table->versioned(VERS_TIMESTAMP) || if (table->versioned(VERS_TIMESTAMP) ||
(table_list->has_period() && !portion_of_time_through_update)) (table_list->has_period() && !portion_of_time_through_update))
table->file->prepare_for_insert(1); table->file->prepare_for_insert();
THD_STAGE_INFO(thd, stage_updating); THD_STAGE_INFO(thd, stage_updating);
while (likely(!(error=info.read_record())) && likely(!thd->killed) && while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
@@ -1243,7 +1243,7 @@ multi_delete::initialize_tables(JOIN *join)
tbl->prepare_for_position(); tbl->prepare_for_position();
if (tbl->versioned(VERS_TIMESTAMP)) if (tbl->versioned(VERS_TIMESTAMP))
tbl->file->prepare_for_insert(1); tbl->file->prepare_for_insert();
} }
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) && else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables) walk == delete_tables)

View File

@@ -886,9 +886,9 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
{ {
if (table->file->ha_rnd_init_with_error(0)) if (table->file->ha_rnd_init_with_error(0))
goto abort; goto abort;
table->file->prepare_for_insert();
} }
} }
table->file->prepare_for_insert();
/** /**
This is a simple check for the case when the table has a trigger This is a simple check for the case when the table has a trigger
that reads from it, or when the statement invokes a stored function that reads from it, or when the statement invokes a stored function
@@ -2544,10 +2544,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
uchar *bitmap; uchar *bitmap;
char *copy_tmp; char *copy_tmp;
uint bitmaps_used; uint bitmaps_used;
KEY_PART_INFO *key_part, *end_part;
Field **default_fields, **virtual_fields; Field **default_fields, **virtual_fields;
KEY *keys;
KEY_PART_INFO *key_parts;
uchar *record; uchar *record;
DBUG_ENTER("Delayed_insert::get_local_table"); DBUG_ENTER("Delayed_insert::get_local_table");
@@ -2615,9 +2612,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
share->default_expressions + 1) * sizeof(Field*), share->default_expressions + 1) * sizeof(Field*),
&virtual_fields, &virtual_fields,
(share->virtual_fields + 1) * sizeof(Field*), (share->virtual_fields + 1) * sizeof(Field*),
&keys, share->keys * sizeof(KEY),
&key_parts,
share->ext_key_parts * sizeof(KEY_PART_INFO),
&record, (uint) share->reclength, &record, (uint) share->reclength,
&bitmap, (uint) share->column_bitmap_size*4, &bitmap, (uint) share->column_bitmap_size*4,
NullS)) NullS))
@@ -2636,13 +2630,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
copy->default_field= default_fields; copy->default_field= default_fields;
if (share->virtual_fields) if (share->virtual_fields)
copy->vfield= virtual_fields; copy->vfield= virtual_fields;
copy->key_info= keys;
copy->base_key_part= key_parts;
/* Copy key and key parts from original table */
memcpy(keys, table->key_info, sizeof(KEY) * share->keys);
memcpy(key_parts, table->base_key_part,
sizeof(KEY_PART_INFO) *share->ext_key_parts);
copy->expr_arena= NULL; copy->expr_arena= NULL;
@@ -2675,34 +2662,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
} }
*field=0; *field=0;
/* The following is needed for long hash key */ if (copy_keys_from_share(copy, client_thd->mem_root))
key_part= copy->base_key_part; goto error;
for (KEY *key= copy->key_info, *end_key= key + share->keys ;
key < end_key;
key++)
{
key->key_part= key_part;
key_part+= key->ext_key_parts;
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
key_part++;
}
for (key_part= copy->base_key_part,
end_part= key_part + share->ext_key_parts ;
key_part < end_part ;
key_part++)
{
Field *field= key_part->field= copy->field[key_part->fieldnr - 1];
/* Fix partial fields, like in open_table_from_share() */
if (field->key_length() != key_part->length &&
!(field->flags & BLOB_FLAG))
{
field= key_part->field= field->make_new_field(client_thd->mem_root,
copy, 0);
field->field_length= key_part->length;
}
}
if (share->virtual_fields || share->default_expressions || if (share->virtual_fields || share->default_expressions ||
share->default_fields) share->default_fields)
@@ -3310,12 +3271,6 @@ pthread_handler_t handle_delayed_insert(void *arg)
di->table->file->ha_release_auto_increment(); di->table->file->ha_release_auto_increment();
mysql_unlock_tables(thd, lock); mysql_unlock_tables(thd, lock);
trans_commit_stmt(thd); trans_commit_stmt(thd);
/*
We have to delete update handler as we need to create a new one
for the next lock table to ensure they have both the same read
view.
*/
di->table->file->delete_update_handler();
di->group_count=0; di->group_count=0;
mysql_audit_release(thd); mysql_audit_release(thd);
/* /*
@@ -3953,9 +3908,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
{ {
if (table->file->ha_rnd_init_with_error(0)) if (table->file->ha_rnd_init_with_error(0))
DBUG_RETURN(1); DBUG_RETURN(1);
table->file->prepare_for_insert();
} }
} }
table->file->prepare_for_insert();
if (info.handle_duplicates == DUP_REPLACE && if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers())) (!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
@@ -4720,9 +4675,9 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
{ {
if (table->file->ha_rnd_init_with_error(0)) if (table->file->ha_rnd_init_with_error(0))
DBUG_RETURN(1); DBUG_RETURN(1);
table->file->prepare_for_insert();
} }
} }
table->file->prepare_for_insert();
if (info.handle_duplicates == DUP_REPLACE && if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers())) (!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);

View File

@@ -652,8 +652,8 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
{ {
if ((error= table_list->table->file->ha_rnd_init_with_error(0))) if ((error= table_list->table->file->ha_rnd_init_with_error(0)))
goto err; goto err;
table->file->prepare_for_insert();
} }
table->file->prepare_for_insert();
thd_progress_init(thd, 2); thd_progress_init(thd, 2);
if (table_list->table->validate_default_values_of_unset_fields(thd)) if (table_list->table->validate_default_values_of_unset_fields(thd))
{ {

View File

@@ -18377,7 +18377,6 @@ TABLE *Create_tmp_table::start(THD *thd,
table->copy_blobs= 1; table->copy_blobs= 1;
table->in_use= thd; table->in_use= thd;
table->no_rows_with_nulls= param->force_not_null_cols; table->no_rows_with_nulls= param->force_not_null_cols;
table->check_unique_buf= NULL;
table->s= share; table->s= share;
init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname); init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);

View File

@@ -11013,6 +11013,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
from->file->column_bitmaps_signal(); from->file->column_bitmaps_signal();
to->file->prepare_for_insert();
/* Tell handler that we have values for all columns in the to table */ /* Tell handler that we have values for all columns in the to table */
to->use_all_columns(); to->use_all_columns();
/* Add virtual columns to vcol_set to ensure they are updated */ /* Add virtual columns to vcol_set to ensure they are updated */

View File

@@ -968,8 +968,7 @@ update_begin:
can_compare_record= records_are_comparable(table); can_compare_record= records_are_comparable(table);
explain->tracker.on_scan_init(); explain->tracker.on_scan_init();
if (table->versioned(VERS_TIMESTAMP) || table_list->has_period()) table->file->prepare_for_insert();
table->file->prepare_for_insert(1);
THD_STAGE_INFO(thd, stage_updating); THD_STAGE_INFO(thd, stage_updating);
while (!(error=info.read_record()) && !thd->killed) while (!(error=info.read_record()) && !thd->killed)
@@ -2028,8 +2027,7 @@ int multi_update::prepare(List<Item> &not_used_values,
{ {
table->read_set= &table->def_read_set; table->read_set= &table->def_read_set;
bitmap_union(table->read_set, &table->tmp_set); bitmap_union(table->read_set, &table->tmp_set);
if (table->versioned(VERS_TIMESTAMP)) table->file->prepare_for_insert();
table->file->prepare_for_insert(1);
} }
} }
if (unlikely(error)) if (unlikely(error))

View File

@@ -1243,46 +1243,31 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
Item *list_item; Item *list_item;
KEY *key= 0; KEY *key= 0;
uint key_index, parts= 0; uint key_index, parts= 0;
KEY_PART_INFO *key_part= table->base_key_part;
for (key_index= 0; key_index < table->s->keys; key_index++) for (key_index= 0; key_index < table->s->keys; key_index++)
{ {
/* key=table->key_info + key_index;
We have to use key from share as this function may have changed
table->key_info if it was ever invoked before. This could happen
in case of INSERT DELAYED.
*/
key= table->s->key_info + key_index;
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
{
parts= key->user_defined_key_parts; parts= key->user_defined_key_parts;
if (key_part[parts].fieldnr == field->field_index + 1) if (key->key_part[parts].fieldnr == field->field_index + 1)
break; break;
key_part++;
} }
key_part+= key->ext_key_parts; if (!key || key->algorithm != HA_KEY_ALG_LONG_HASH)
}
if (key_index == table->s->keys)
goto end; goto end;
KEY_PART_INFO *keypart;
/* Correct the key & key_parts if this function has been called before */ for (uint i=0; i < parts; i++)
key= table->key_info + key_index;
key->key_part= key_part;
for (uint i=0; i < parts; i++, key_part++)
{ {
if (key_part->key_part_flag & HA_PART_KEY_SEG) keypart= key->key_part + i;
if (keypart->key_part_flag & HA_PART_KEY_SEG)
{ {
int length= key_part->length/key_part->field->charset()->mbmaxlen; int length= keypart->length/keypart->field->charset()->mbmaxlen;
list_item= new (mem_root) Item_func_left(thd, list_item= new (mem_root) Item_func_left(thd,
new (mem_root) Item_field(thd, key_part->field), new (mem_root) Item_field(thd, keypart->field),
new (mem_root) Item_int(thd, length)); new (mem_root) Item_int(thd, length));
list_item->fix_fields(thd, NULL); list_item->fix_fields(thd, NULL);
key_part->field->vcol_info= keypart->field->vcol_info=
table->field[key_part->field->field_index]->vcol_info; table->field[keypart->field->field_index]->vcol_info;
} }
else else
list_item= new (mem_root) Item_field(thd, key_part->field); list_item= new (mem_root) Item_field(thd, keypart->field);
field_list->push_back(list_item, mem_root); field_list->push_back(list_item, mem_root);
} }
Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list); Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
@@ -3709,6 +3694,54 @@ static void print_long_unique_table(TABLE *table)
} }
#endif #endif
bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root)
{
TABLE_SHARE *share= outparam->s;
if (share->key_parts)
{
KEY *key_info, *key_info_end;
KEY_PART_INFO *key_part;
if (!multi_alloc_root(root, &key_info, share->keys*sizeof(KEY),
&key_part, share->ext_key_parts*sizeof(KEY_PART_INFO),
NullS))
return 1;
outparam->key_info= key_info;
memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys);
memcpy(key_part, key_info->key_part, sizeof(*key_part)*share->ext_key_parts);
my_ptrdiff_t adjust_ptrs= PTR_BYTE_DIFF(key_part, key_info->key_part);
for (key_info_end= key_info + share->keys ;
key_info < key_info_end ;
key_info++)
{
key_info->table= outparam;
(uchar*&)(key_info->key_part)+= adjust_ptrs;
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
key_info->flags&= ~HA_NOSAME;
}
for (KEY_PART_INFO *key_part_end= key_part+share->ext_key_parts;
key_part < key_part_end;
key_part++)
{
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
if (field->key_length() != key_part->length &&
!(field->flags & BLOB_FLAG))
{
/*
We are using only a prefix of the column as a key:
Create a new field for the key part that matches the index
*/
field= key_part->field=field->make_new_field(root, outparam, 0);
field->field_length= key_part->length;
}
}
}
return 0;
}
/* /*
Open a table based on a TABLE_SHARE Open a table based on a TABLE_SHARE
@@ -3871,58 +3904,8 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
outparam->found_next_number_field= outparam->found_next_number_field=
outparam->field[(uint) (share->found_next_number_field - share->field)]; outparam->field[(uint) (share->found_next_number_field - share->field)];
/* Fix key->name and key_part->field */ if (copy_keys_from_share(outparam, &outparam->mem_root))
if (share->key_parts) goto err;
{
KEY *key_info, *key_info_end;
KEY_PART_INFO *key_part;
uint n_length;
n_length= share->keys*sizeof(KEY) + share->ext_key_parts*sizeof(KEY_PART_INFO);
if (!(key_info= (KEY*) alloc_root(&outparam->mem_root, n_length)))
goto err;
outparam->key_info= key_info;
key_part= (reinterpret_cast<KEY_PART_INFO*>(key_info+share->keys));
outparam->base_key_part= key_part;
memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys);
memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) *
share->ext_key_parts));
for (key_info_end= key_info + share->keys ;
key_info < key_info_end ;
key_info++)
{
KEY_PART_INFO *key_part_end;
key_info->table= outparam;
key_info->key_part= key_part;
key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
key_info->user_defined_key_parts) ;
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
{
key_part_end++;
key_info->flags&= ~HA_NOSAME;
}
for ( ; key_part < key_part_end; key_part++)
{
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
if (field->key_length() != key_part->length &&
!(field->flags & BLOB_FLAG))
{
/*
We are using only a prefix of the column as a key:
Create a new field for the key part that matches the index
*/
field= key_part->field=field->make_new_field(&outparam->mem_root,
outparam, 0);
const_cast<uint32_t&>(field->field_length)= key_part->length;
}
}
if (!share->use_ext_keys)
key_part+= key_info->ext_key_parts - key_info->user_defined_key_parts;
}
}
/* /*
Process virtual and default columns, if any. Process virtual and default columns, if any.
@@ -5256,7 +5239,6 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
range_rowid_filter_cost_info_elems= 0; range_rowid_filter_cost_info_elems= 0;
range_rowid_filter_cost_info_ptr= NULL; range_rowid_filter_cost_info_ptr= NULL;
range_rowid_filter_cost_info= NULL; range_rowid_filter_cost_info= NULL;
check_unique_buf= NULL;
vers_write= s->versioned; vers_write= s->versioned;
quick_condition_rows=0; quick_condition_rows=0;
no_cache= false; no_cache= false;

View File

@@ -1151,8 +1151,6 @@ public:
THD *in_use; /* Which thread uses this */ THD *in_use; /* Which thread uses this */
uchar *record[3]; /* Pointer to records */ uchar *record[3]; /* Pointer to records */
/* record buf to resolve hash collisions for long UNIQUE constraints */
uchar *check_unique_buf;
uchar *write_row_record; /* Used as optimisation in uchar *write_row_record; /* Used as optimisation in
THD::write_row */ THD::write_row */
uchar *insert_values; /* used by INSERT ... UPDATE */ uchar *insert_values; /* used by INSERT ... UPDATE */
@@ -1181,7 +1179,6 @@ public:
/* Map of keys dependent on some constraint */ /* Map of keys dependent on some constraint */
key_map constraint_dependent_keys; key_map constraint_dependent_keys;
KEY *key_info; /* data of keys in database */ KEY *key_info; /* data of keys in database */
KEY_PART_INFO *base_key_part; /* Where key parts are stored */
Field **field; /* Pointer to fields */ Field **field; /* Pointer to fields */
Field **vfield; /* Pointer to virtual fields*/ Field **vfield; /* Pointer to virtual fields*/
@@ -2979,6 +2976,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
uint ha_open_flags, TABLE *outparam, uint ha_open_flags, TABLE *outparam,
bool is_create_table, bool is_create_table,
List<String> *partitions_to_open= NULL); List<String> *partitions_to_open= NULL);
bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root);
bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol); bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol);
bool fix_session_vcol_expr_for_read(THD *thd, Field *field, bool fix_session_vcol_expr_for_read(THD *thd, Field *field,
Virtual_column_info *vcol); Virtual_column_info *vcol);