1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

MDEV-22456 Dropping the adaptive hash index may cause DDL to lock up InnoDB

If the InnoDB buffer pool contains many pages for a table or index
that is being dropped or rebuilt, and if many of such pages are
pointed to by the adaptive hash index, dropping the adaptive hash index
may consume a lot of time.

The time-consuming operation of dropping the adaptive hash index entries
is being executed while the InnoDB data dictionary cache dict_sys is
exclusively locked.

It is not actually necessary to drop all adaptive hash index entries
at the time a table or index is being dropped or rebuilt. We can let
the LRU replacement policy of the buffer pool take care of this gradually.
For this to work, we must detach the dict_table_t and dict_index_t
objects from the main dict_sys cache, and once the last
adaptive hash index entry for the detached table is removed
(when the garbage page is evicted from the buffer pool) we can free
the dict_table_t and dict_index_t object.

Related to this, in MDEV-16283, we made ALTER TABLE...DISCARD TABLESPACE
skip both the buffer pool eviction and the drop of the adaptive hash index.
We shifted the burden to ALTER TABLE...IMPORT TABLESPACE or DROP TABLE.
We can remove the eviction from DROP TABLE. We must retain the eviction
in the ALTER TABLE...IMPORT TABLESPACE code path, so that in case the
discarded table is being re-imported with the same tablespace identifier,
the fresh data from the imported tablespace will replace any stale pages
in the buffer pool.

rpl.rpl_failed_drop_tbl_binlog: Remove the test. DROP TABLE can
no longer be interrupted inside InnoDB.

fseg_free_page(), fseg_free_step(), fseg_free_step_not_header(),
fseg_free_page_low(), fseg_free_extent(): Remove the parameter
that specifies whether the adaptive hash index should be dropped.

btr_search_lazy_free(): Lazily free an index when the last
reference to it is dropped from the adaptive hash index.

buf_pool_clear_hash_index(): Declare static, and move to the
same compilation unit with the bulk of the adaptive hash index
code.

dict_index_t::clone(), dict_index_t::clone_if_needed():
Clone an index that is being rebuilt while adaptive hash index
entries exist. The original index will be inserted into
dict_table_t::freed_indexes and dict_index_t::set_freed()
will be called.

dict_index_t::set_freed(), dict_index_t::freed(): Note that
or check whether the index has been freed. We will use the
impossible page number 1 to denote this condition.

dict_index_t::n_ahi_pages(): Replaces btr_search_info_get_ref_count().

dict_index_t::detach_columns(): Move the assignment n_fields=0
to ha_innobase_inplace_ctx::clear_added_indexes().
We must have access to the columns when freeing the
adaptive hash index. Note: dict_table_t::v_cols[] will remain
valid. If virtual columns are dropped or added, the table
definition will be reloaded in ha_innobase::commit_inplace_alter_table().

buf_page_mtr_lock(): Drop a stale adaptive hash index if needed.

We will also reduce the number of btr_get_search_latch() calls
and enclose some more code inside #ifdef BTR_CUR_HASH_ADAPT
in order to benefit cmake -DWITH_INNODB_AHI=OFF.
This commit is contained in:
Marko Mäkelä
2020-05-15 17:10:59 +03:00
parent ff66d65a09
commit ad6171b91c
34 changed files with 552 additions and 903 deletions

View File

@@ -1,32 +0,0 @@
include/master-slave.inc
[connection master]
create table t1 (a int) engine=innodb;
create table t2 (b longblob) engine=innodb;
create table t3 (c int) engine=innodb;
insert into t2 values (repeat('b',1024*1024));
insert into t2 select * from t2;
insert into t2 select * from t2;
insert into t2 select * from t2;
insert into t2 select * from t2;
set debug_sync='rm_table_no_locks_before_delete_table SIGNAL nogo WAIT_FOR go EXECUTE 2';
drop table t1, t2, t3;
connect foo,localhost,root;
set debug_sync='now SIGNAL go';
kill query CONNECTION_ID;
connection master;
ERROR 70100: Query execution was interrupted
"Tables t2 and t3 should be listed"
SHOW TABLES;
Tables_in_test
t2
t3
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
connection slave;
drop table t2, t3;
connection master;
set debug_sync='RESET';
drop table t2, t3;
include/rpl_end.inc

View File

@@ -1,64 +0,0 @@
# ==== Purpose ====
#
# Check that when the execution of a DROP TABLE command with single table
# fails it should not be written to the binary log. Also test that when the
# execution of DROP TABLE command with multiple tables fails the command
# should be written into the binary log.
#
# ==== Implementation ====
#
# Steps:
# 0 - Create tables named t1, t2, t3
# 1 - Execute DROP TABLE t1,t2,t3 command.
# 2 - Kill the DROP TABLE command while it is trying to drop table 't2'.
# 3 - Verify that tables t2,t3 are present after the DROP command execution
# was interrupted.
# 4 - Check that table 't1' is present in binary log as part of DROP
# command.
#
# ==== References ====
#
# MDEV-20348: DROP TABLE IF EXISTS killed on master but was replicated.
#
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_statement.inc
--source include/master-slave.inc
create table t1 (a int) engine=innodb;
create table t2 (b longblob) engine=innodb;
create table t3 (c int) engine=innodb;
insert into t2 values (repeat('b',1024*1024));
insert into t2 select * from t2;
insert into t2 select * from t2;
insert into t2 select * from t2;
insert into t2 select * from t2;
let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
let $id=`select connection_id()`;
set debug_sync='rm_table_no_locks_before_delete_table SIGNAL nogo WAIT_FOR go EXECUTE 2';
send drop table t1, t2, t3;
connect foo,localhost,root;
set debug_sync='now SIGNAL go';
let $wait_condition=select 1 from information_schema.processlist where state like 'debug sync point:%';
source include/wait_condition.inc;
--replace_result $id CONNECTION_ID
eval kill query $id;
connection master;
error ER_QUERY_INTERRUPTED;
reap;
--echo "Tables t2 and t3 should be listed"
SHOW TABLES;
--source include/show_binlog_events.inc
--sync_slave_with_master
drop table t2, t3;
connection master;
set debug_sync='RESET';
drop table t2, t3;
source include/rpl_end.inc;

View File

@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc. Copyright (c) 2012, Facebook Inc.
Copyright (c) 2014, 2019, MariaDB Corporation. Copyright (c) 2014, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -723,8 +723,10 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
{ {
ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table));
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->index || !blob); if (block->index && !block->index->freed()) {
ut_ad(!block->index || page_is_leaf(block->frame)); ut_ad(!blob);
ut_ad(page_is_leaf(block->frame));
}
#endif #endif
ut_ad(index->space == block->page.id.space()); ut_ad(index->space == block->page.id.space());
/* The root page is freed by btr_free_root(). */ /* The root page is freed by btr_free_root(). */
@@ -751,7 +753,7 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
fseg_free_page(seg_header, fseg_free_page(seg_header,
block->page.id.space(), block->page.id.space(),
block->page.id.page_no(), block->page.id.page_no(),
block->index != NULL, mtr); mtr);
/* The page was marked free in the allocation bitmap, but it /* The page was marked free in the allocation bitmap, but it
should remain exclusively latched until mtr_t::commit() or until it should remain exclusively latched until mtr_t::commit() or until it
@@ -876,7 +878,7 @@ btr_page_get_father_node_ptr_func(
err = btr_cur_search_to_nth_level( err = btr_cur_search_to_nth_level(
index, level + 1, tuple, index, level + 1, tuple,
PAGE_CUR_LE, latch_mode, cursor, 0, PAGE_CUR_LE, latch_mode, cursor, 0,
file, line, mtr); file, line, mtr, 0);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
ib::warn() << " Error code: " << err ib::warn() << " Error code: " << err
@@ -1005,7 +1007,7 @@ static void btr_free_root(buf_block_t* block, mtr_t* mtr, bool invalidate)
BTR_FREED_INDEX_ID, mtr); BTR_FREED_INDEX_ID, mtr);
} }
while (!fseg_free_step(header, true, mtr)) { while (!fseg_free_step(header, mtr)) {
/* Free the entire segment in small steps. */ /* Free the entire segment in small steps. */
} }
} }
@@ -1254,7 +1256,7 @@ leaf_loop:
fsp0fsp. */ fsp0fsp. */
finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF, finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF,
true, &mtr); &mtr);
mtr_commit(&mtr); mtr_commit(&mtr);
if (!finished) { if (!finished) {
@@ -1274,7 +1276,7 @@ top_loop:
#endif /* UNIV_BTR_DEBUG */ #endif /* UNIV_BTR_DEBUG */
finished = fseg_free_step_not_header( finished = fseg_free_step_not_header(
root + PAGE_HEADER + PAGE_BTR_SEG_TOP, true, &mtr); root + PAGE_HEADER + PAGE_BTR_SEG_TOP, &mtr);
mtr_commit(&mtr); mtr_commit(&mtr);
if (!finished) { if (!finished) {
@@ -2342,7 +2344,7 @@ btr_insert_on_non_leaf_level_func(
dberr_t err = btr_cur_search_to_nth_level( dberr_t err = btr_cur_search_to_nth_level(
index, level, tuple, PAGE_CUR_LE, index, level, tuple, PAGE_CUR_LE,
BTR_CONT_MODIFY_TREE, BTR_CONT_MODIFY_TREE,
&cursor, 0, file, line, mtr); &cursor, 0, file, line, mtr, 0);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
ib::warn() << " Error code: " << err ib::warn() << " Error code: " << err
@@ -2363,7 +2365,7 @@ btr_insert_on_non_leaf_level_func(
btr_cur_search_to_nth_level(index, level, tuple, btr_cur_search_to_nth_level(index, level, tuple,
PAGE_CUR_RTREE_INSERT, PAGE_CUR_RTREE_INSERT,
BTR_CONT_MODIFY_TREE, BTR_CONT_MODIFY_TREE,
&cursor, 0, file, line, mtr); &cursor, 0, file, line, mtr, 0);
} }
ut_ad(cursor.flag == BTR_CUR_BINARY); ut_ad(cursor.flag == BTR_CUR_BINARY);

View File

@@ -864,8 +864,7 @@ search tuple should be performed in the B-tree. InnoDB does an insert
immediately after the cursor. Thus, the cursor may end up on a user record, immediately after the cursor. Thus, the cursor may end up on a user record,
or on a page infimum record. */ or on a page infimum record. */
dberr_t dberr_t
btr_cur_search_to_nth_level( btr_cur_search_to_nth_level_func(
/*========================*/
dict_index_t* index, /*!< in: index */ dict_index_t* index, /*!< in: index */
ulint level, /*!< in: the tree level of search */ ulint level, /*!< in: the tree level of search */
const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in
@@ -887,10 +886,12 @@ btr_cur_search_to_nth_level(
to protect the record! */ to protect the record! */
btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */ s- or x-latched, but see also above! */
#ifdef BTR_CUR_HASH_ADAPT
ulint has_search_latch, ulint has_search_latch,
/*!< in: info on the latch mode the /*!< in: info on the latch mode the
caller currently has on search system: caller currently has on search system:
RW_S_LATCH, or 0 */ RW_S_LATCH, or 0 */
#endif /* BTR_CUR_HASH_ADAPT */
const char* file, /*!< in: file name */ const char* file, /*!< in: file name */
unsigned line, /*!< in: line where called */ unsigned line, /*!< in: line where called */
mtr_t* mtr, /*!< in: mtr */ mtr_t* mtr, /*!< in: mtr */
@@ -1053,6 +1054,7 @@ btr_cur_search_to_nth_level(
} }
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
rw_lock_t* const search_latch = btr_get_search_latch(index);
# ifdef UNIV_SEARCH_PERF_STAT # ifdef UNIV_SEARCH_PERF_STAT
info->n_searches++; info->n_searches++;
@@ -1073,8 +1075,7 @@ btr_cur_search_to_nth_level(
will have to check it again. */ will have to check it again. */
&& btr_search_enabled && btr_search_enabled
&& !modify_external && !modify_external
&& rw_lock_get_writer(btr_get_search_latch(index)) && rw_lock_get_writer(search_latch) == RW_LOCK_NOT_LOCKED
== RW_LOCK_NOT_LOCKED
&& btr_search_guess_on_hash(index, info, tuple, mode, && btr_search_guess_on_hash(index, info, tuple, mode,
latch_mode, cursor, latch_mode, cursor,
has_search_latch, mtr)) { has_search_latch, mtr)) {
@@ -1098,10 +1099,12 @@ btr_cur_search_to_nth_level(
/* If the hash search did not succeed, do binary search down the /* If the hash search did not succeed, do binary search down the
tree */ tree */
#ifdef BTR_CUR_HASH_ADAPT
if (has_search_latch) { if (has_search_latch) {
/* Release possible search latch to obey latching order */ /* Release possible search latch to obey latching order */
btr_search_s_unlock(index); rw_lock_s_unlock(search_latch);
} }
#endif /* BTR_CUR_HASH_ADAPT */
/* Store the position of the tree latch we push to mtr so that we /* Store the position of the tree latch we push to mtr so that we
know how to release it when we have latched leaf node(s) */ know how to release it when we have latched leaf node(s) */
@@ -2155,9 +2158,11 @@ func_exit:
ut_free(prev_tree_savepoints); ut_free(prev_tree_savepoints);
} }
#ifdef BTR_CUR_HASH_ADAPT
if (has_search_latch) { if (has_search_latch) {
btr_search_s_lock(index); rw_lock_s_lock(search_latch);
} }
#endif /* BTR_CUR_HASH_ADAPT */
if (mbr_adj) { if (mbr_adj) {
/* remember that we will need to adjust parent MBR */ /* remember that we will need to adjust parent MBR */
@@ -5746,7 +5751,7 @@ btr_estimate_n_rows_in_range_low(
btr_cur_search_to_nth_level(index, 0, tuple1, mode1, btr_cur_search_to_nth_level(index, 0, tuple1, mode1,
BTR_SEARCH_LEAF | BTR_ESTIMATE, BTR_SEARCH_LEAF | BTR_ESTIMATE,
&cursor, 0, &cursor, 0,
__FILE__, __LINE__, &mtr); __FILE__, __LINE__, &mtr, 0);
ut_ad(!page_rec_is_infimum(btr_cur_get_rec(&cursor))); ut_ad(!page_rec_is_infimum(btr_cur_get_rec(&cursor)));
@@ -5800,7 +5805,7 @@ btr_estimate_n_rows_in_range_low(
btr_cur_search_to_nth_level(index, 0, tuple2, mode2, btr_cur_search_to_nth_level(index, 0, tuple2, mode2,
BTR_SEARCH_LEAF | BTR_ESTIMATE, BTR_SEARCH_LEAF | BTR_ESTIMATE,
&cursor, 0, &cursor, 0,
__FILE__, __LINE__, &mtr); __FILE__, __LINE__, &mtr, 0);
const rec_t* rec = btr_cur_get_rec(&cursor); const rec_t* rec = btr_cur_get_rec(&cursor);

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, 2019, MariaDB Corporation. Copyright (c) 2016, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -353,8 +353,11 @@ btr_pcur_restore_position_func(
mode = PAGE_CUR_UNSUPP; mode = PAGE_CUR_UNSUPP;
} }
btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode, btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode, cursor,
cursor, 0, file, line, mtr); #ifdef BTR_CUR_HASH_ADAPT
0,
#endif /* BTR_CUR_HASH_ADAPT */
file, line, mtr);
/* Restore the old search mode */ /* Restore the old search mode */
cursor->search_mode = old_mode; cursor->search_mode = old_mode;

View File

@@ -2,7 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc. Copyright (c) 2008, Google Inc.
Copyright (c) 2017, 2019, MariaDB Corporation. Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -141,15 +141,8 @@ static
void void
btr_search_check_free_space_in_heap(dict_index_t* index) btr_search_check_free_space_in_heap(dict_index_t* index)
{ {
hash_table_t* table; hash_table_t* table = btr_get_search_table(index);
mem_heap_t* heap; mem_heap_t* heap = table->heap;
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
table = btr_get_search_table(index);
heap = table->heap;
/* Note that we peek the value of heap->free_block without reserving /* Note that we peek the value of heap->free_block without reserving
the latch: this is ok, because we will not guarantee that there will the latch: this is ok, because we will not guarantee that there will
@@ -157,8 +150,9 @@ btr_search_check_free_space_in_heap(dict_index_t* index)
if (heap->free_block == NULL) { if (heap->free_block == NULL) {
buf_block_t* block = buf_block_alloc(NULL); buf_block_t* block = buf_block_alloc(NULL);
rw_lock_t* latch = btr_get_search_latch(index);
btr_search_x_lock(index); rw_lock_x_lock(latch);
if (btr_search_enabled if (btr_search_enabled
&& heap->free_block == NULL) { && heap->free_block == NULL) {
@@ -167,7 +161,7 @@ btr_search_check_free_space_in_heap(dict_index_t* index)
buf_block_free(block); buf_block_free(block);
} }
btr_search_x_unlock(index); rw_lock_x_unlock(latch);
} }
} }
@@ -289,18 +283,88 @@ btr_search_disable_ref_count(
{ {
dict_index_t* index; dict_index_t* index;
ut_ad(mutex_own(&dict_sys->mutex));
for (index = dict_table_get_first_index(table); for (index = dict_table_get_first_index(table);
index != NULL; index != NULL;
index = dict_table_get_next_index(index)) { index = dict_table_get_next_index(index)) {
ut_ad(rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
index->search_info->ref_count = 0; index->search_info->ref_count = 0;
} }
} }
/** Lazily free detached metadata when removing the last reference. */
ATTRIBUTE_COLD static void btr_search_lazy_free(dict_index_t *index)
{
ut_ad(index->freed());
dict_table_t *table= index->table;
/* Perform the skipped steps of dict_index_remove_from_cache_low(). */
UT_LIST_REMOVE(table->freed_indexes, index);
rw_lock_free(&index->lock);
dict_mem_index_free(index);
if (!UT_LIST_GET_LEN(table->freed_indexes) &&
!UT_LIST_GET_LEN(table->indexes))
{
ut_ad(table->id == 0);
dict_mem_table_free(table);
}
}
/** Clear the adaptive hash index on all pages in the buffer pool. */
static void buf_pool_clear_hash_index()
{
ut_ad(btr_search_own_all(RW_LOCK_X));
ut_ad(!btr_search_enabled);
std::set<dict_index_t*> garbage;
for (ulong p = 0; p < srv_buf_pool_instances; p++)
{
buf_pool_t *buf_pool= buf_pool_from_array(p);
buf_chunk_t *chunks= buf_pool->chunks;
buf_chunk_t *chunk= chunks + buf_pool->n_chunks;
while (--chunk >= chunks)
{
buf_block_t *block= chunk->blocks;
for (ulint i= chunk->size; i--; block++)
{
dict_index_t *index= block->index;
assert_block_ahi_valid(block);
/* We can clear block->index and block->n_pointers when
btr_search_own_all(RW_LOCK_X); see the comments in buf0buf.h */
if (!index)
{
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(!block->n_pointers);
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
continue;
}
ut_d(buf_page_state state= buf_block_get_state(block));
/* Another thread may have set the state to
BUF_BLOCK_REMOVE_HASH in buf_LRU_block_remove_hashed().
The state change in buf_page_realloc() is not observable here,
because in that case we would have !block->index.
In the end, the entire adaptive hash index will be removed. */
ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
block->n_pointers= 0;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
if (index->freed())
garbage.insert(index);
block->index= NULL;
}
}
}
for (std::set<dict_index_t*>::iterator i= garbage.begin();
i != garbage.end(); i++)
btr_search_lazy_free(*i);
}
/** Disable the adaptive hash search system and empty the index. /** Disable the adaptive hash search system and empty the index.
@param[in] need_mutex need to acquire dict_sys->mutex */ @param[in] need_mutex need to acquire dict_sys->mutex */
void void
@@ -373,33 +437,6 @@ btr_search_enable()
btr_search_x_unlock_all(); btr_search_x_unlock_all();
} }
/** Returns the value of ref_count. The value is protected by latch.
@param[in] info search info
@param[in] index index identifier
@return ref_count value. */
ulint
btr_search_info_get_ref_count(
btr_search_t* info,
dict_index_t* index)
{
ulint ret = 0;
if (!btr_search_enabled) {
return(ret);
}
ut_ad(info);
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
btr_search_s_lock(index);
ret = info->ref_count;
btr_search_s_unlock(index);
return(ret);
}
/** Updates the search info of an index about hash successes. NOTE that info /** Updates the search info of an index about hash successes. NOTE that info
is NOT protected by any semaphore, to save CPU time! Do not assume its fields is NOT protected by any semaphore, to save CPU time! Do not assume its fields
are consistent. are consistent.
@@ -415,8 +452,8 @@ btr_search_info_update_hash(
ulint n_unique; ulint n_unique;
int cmp; int cmp;
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S)); ut_ad(!rw_lock_own_flagged(btr_get_search_latch(index),
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
if (dict_index_is_ibuf(index)) { if (dict_index_is_ibuf(index)) {
/* So many deletes are performed on an insert buffer tree /* So many deletes are performed on an insert buffer tree
@@ -601,28 +638,25 @@ btr_search_update_hash_ref(
buf_block_t* block, buf_block_t* block,
const btr_cur_t* cursor) const btr_cur_t* cursor)
{ {
dict_index_t* index;
ulint fold;
rec_t* rec;
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL); ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
ut_ad(rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
ut_ad(rw_lock_own_flagged(&block->lock, ut_ad(rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
ut_ad(page_align(btr_cur_get_rec(cursor)) == block->frame); ut_ad(page_align(btr_cur_get_rec(cursor)) == block->frame);
ut_ad(page_is_leaf(block->frame)); ut_ad(page_is_leaf(block->frame));
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
index = block->index; dict_index_t* index = block->index;
if (!index) { if (!index) {
return; return;
} }
ut_ad(block->page.id.space() == index->space); ut_ad(block->page.id.space() == index->space);
ut_a(index == cursor->index); ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
rw_lock_t* const latch = btr_get_search_latch(index);
rw_lock_x_lock(latch);
if ((info->n_hash_potential > 0) if ((info->n_hash_potential > 0)
&& (block->curr_n_fields == info->n_fields) && (block->curr_n_fields == info->n_fields)
@@ -632,14 +666,14 @@ btr_search_update_hash_ref(
rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_); rec_offs_init(offsets_);
rec = btr_cur_get_rec(cursor); const rec_t* rec = btr_cur_get_rec(cursor);
if (!page_rec_is_user_rec(rec)) { if (!page_rec_is_user_rec(rec)) {
goto func_exit;
return;
} }
fold = rec_fold(rec, ulint fold = rec_fold(
rec,
rec_get_offsets(rec, index, offsets_, true, rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap), ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_fields,
@@ -647,13 +681,15 @@ btr_search_update_hash_ref(
if (UNIV_LIKELY_NULL(heap)) { if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap); mem_heap_free(heap);
} }
ut_ad(rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
ha_insert_for_fold(btr_get_search_table(index), fold, ha_insert_for_fold(btr_get_search_table(index), fold,
block, rec); block, rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
} }
func_exit:
rw_lock_x_unlock(latch);
} }
/** Updates the search info. /** Updates the search info.
@@ -692,12 +728,7 @@ btr_search_info_update_slow(
#ifdef UNIV_SEARCH_PERF_STAT #ifdef UNIV_SEARCH_PERF_STAT
btr_search_n_hash_fail++; btr_search_n_hash_fail++;
#endif /* UNIV_SEARCH_PERF_STAT */ #endif /* UNIV_SEARCH_PERF_STAT */
btr_search_x_lock(cursor->index);
btr_search_update_hash_ref(info, block, cursor); btr_search_update_hash_ref(info, block, cursor);
btr_search_x_unlock(cursor->index);
} }
if (build_index) { if (build_index) {
@@ -898,7 +929,6 @@ btr_search_guess_on_hash(
ulint has_search_latch, ulint has_search_latch,
mtr_t* mtr) mtr_t* mtr)
{ {
const rec_t* rec;
ulint fold; ulint fold;
index_id_t index_id; index_id_t index_id;
#ifdef notdefined #ifdef notdefined
@@ -944,33 +974,28 @@ btr_search_guess_on_hash(
cursor->fold = fold; cursor->fold = fold;
cursor->flag = BTR_CUR_HASH; cursor->flag = BTR_CUR_HASH;
rw_lock_t* const latch = btr_get_search_latch(index);
if (!has_search_latch) { if (!has_search_latch) {
btr_search_s_lock(index); rw_lock_s_lock(latch);
if (!btr_search_enabled) { if (!btr_search_enabled) {
btr_search_s_unlock(index); fail:
btr_search_failure(info, cursor);
return(FALSE);
}
}
ut_ad(rw_lock_get_writer(btr_get_search_latch(index)) != RW_LOCK_X);
ut_ad(rw_lock_get_reader_count(btr_get_search_latch(index)) > 0);
rec = (rec_t*) ha_search_and_get_data(
btr_get_search_table(index), fold);
if (rec == NULL) {
if (!has_search_latch) { if (!has_search_latch) {
btr_search_s_unlock(index); rw_lock_s_unlock(latch);
}
btr_search_failure(info, cursor);
return(FALSE);
}
} }
btr_search_failure(info, cursor); ut_ad(rw_lock_own(latch, RW_LOCK_S));
return(FALSE); const rec_t* rec = static_cast<const rec_t*>(
ha_search_and_get_data(btr_get_search_table(index), fold));
if (!rec) {
goto fail;
} }
buf_block_t* block = buf_block_from_ahi(rec); buf_block_t* block = buf_block_from_ahi(rec);
@@ -980,32 +1005,34 @@ btr_search_guess_on_hash(
if (!buf_page_get_known_nowait( if (!buf_page_get_known_nowait(
latch_mode, block, BUF_MAKE_YOUNG, latch_mode, block, BUF_MAKE_YOUNG,
__FILE__, __LINE__, mtr)) { __FILE__, __LINE__, mtr)) {
goto fail;
if (!has_search_latch) {
btr_search_s_unlock(index);
} }
btr_search_failure(info, cursor); const bool fail = index != block->index
&& index_id == block->index->id;
return(FALSE); ut_a(!fail || block->index->freed());
} rw_lock_s_unlock(latch);
btr_search_s_unlock(index);
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
if (UNIV_UNLIKELY(fail)) {
btr_search_drop_page_hash_index(block);
goto fail_and_release_page;
}
} else if (UNIV_UNLIKELY(index != block->index
&& index_id == block->index->id)) {
ut_a(block->index->freed());
goto fail_and_release_page;
} }
if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH); ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
fail_and_release_page:
if (!has_search_latch) { if (!has_search_latch) {
btr_leaf_page_release(block, latch_mode, mtr); btr_leaf_page_release(block, latch_mode, mtr);
} }
btr_search_failure(info, cursor); btr_search_failure(info, cursor);
return(FALSE); return(FALSE);
} }
@@ -1024,14 +1051,7 @@ btr_search_guess_on_hash(
|| !btr_search_check_guess(cursor, || !btr_search_check_guess(cursor,
has_search_latch, has_search_latch,
tuple, mode, mtr)) { tuple, mode, mtr)) {
goto fail_and_release_page;
if (!has_search_latch) {
btr_leaf_page_release(block, latch_mode, mtr);
}
btr_search_failure(info, cursor);
return(FALSE);
} }
if (info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5) { if (info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5) {
@@ -1115,30 +1135,26 @@ btr_search_drop_page_hash_index(buf_block_t* block)
ulint* folds; ulint* folds;
ulint i; ulint i;
mem_heap_t* heap; mem_heap_t* heap;
const dict_index_t* index;
rec_offs* offsets; rec_offs* offsets;
rw_lock_t* latch; rw_lock_t* latch;
btr_search_t* info;
retry: retry:
/* Do a dirty check on block->index, return if the block is
not in the adaptive hash index. */
index = block->index;
/* This debug check uses a dirty read that could theoretically cause /* This debug check uses a dirty read that could theoretically cause
false positives while buf_pool_clear_hash_index() is executing. */ false positives while buf_pool_clear_hash_index() is executing. */
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
if (index == NULL) { if (!block->index) {
return; return;
} }
ut_ad(block->page.buf_fix_count == 0 ut_ad(block->page.buf_fix_count == 0
|| buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH || buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH
|| rw_lock_own_flagged(&block->lock, || rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); RW_LOCK_FLAG_X | RW_LOCK_FLAG_S
| RW_LOCK_FLAG_SX));
ut_ad(page_is_leaf(block->frame)); ut_ad(page_is_leaf(block->frame));
/* We must not dereference index here, because it could be freed /* We must not dereference block->index here, because it could be freed
if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys->mutex)). if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys->mutex)).
Determine the ahi_slot based on the block contents. */ Determine the ahi_slot based on the block contents. */
@@ -1156,18 +1172,12 @@ retry:
rw_lock_s_lock(latch); rw_lock_s_lock(latch);
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
if (block->index == NULL) { if (!block->index) {
rw_lock_s_unlock(latch); rw_lock_s_unlock(latch);
return; return;
} }
/* The index associated with a block must remain the dict_index_t* index = block->index;
same, because we are holding block->lock or the block is
not accessible by other threads (BUF_BLOCK_REMOVE_HASH),
or the index is not accessible to other threads
(buf_fix_count == 0 when DROP TABLE or similar is executing
buf_LRU_drop_page_hash_for_tablespace()). */
ut_a(index == block->index);
#ifdef MYSQL_INDEX_DISABLE_AHI #ifdef MYSQL_INDEX_DISABLE_AHI
ut_ad(!index->disable_ahi); ut_ad(!index->disable_ahi);
#endif #endif
@@ -1176,7 +1186,7 @@ retry:
ut_ad(index->space == FIL_NULL ut_ad(index->space == FIL_NULL
|| block->page.id.space() == index->space); || block->page.id.space() == index->space);
ut_a(index_id == index->id); ut_a(index_id == index->id);
ut_a(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
switch (dict_index_get_online_status(index)) { switch (dict_index_get_online_status(index)) {
case ONLINE_INDEX_CREATION: case ONLINE_INDEX_CREATION:
@@ -1281,9 +1291,14 @@ next_rec:
folds[i], page); folds[i], page);
} }
info = btr_search_get_info(block->index); switch (index->search_info->ref_count--) {
ut_a(info->ref_count > 0); case 0:
info->ref_count--; ut_error;
case 1:
if (index->freed()) {
btr_search_lazy_free(index);
}
}
block->index = NULL; block->index = NULL;
@@ -1383,27 +1398,27 @@ btr_search_build_page_hash_index(
rec_offs_init(offsets_); rec_offs_init(offsets_);
ut_ad(index); ut_ad(index);
ut_ad(block->page.id.space() == index->space); ut_ad(block->page.id.space() == index->space);
ut_a(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
ut_ad(page_is_leaf(block->frame)); ut_ad(page_is_leaf(block->frame));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
ut_ad(rw_lock_own_flagged(&block->lock, ut_ad(rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
ut_ad(block->page.id.page_no() >= 3);
btr_search_s_lock(index); rw_lock_t* const latch = btr_get_search_latch(index);
rw_lock_s_lock(latch);
table = btr_get_search_table(index); table = btr_get_search_table(index);
page = buf_block_get_frame(block); page = buf_block_get_frame(block);
if (block->index && ((block->curr_n_fields != n_fields) const bool must_drop = block->index
&& ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes) || (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) { || (block->curr_left_side != left_side));
rw_lock_s_unlock(latch);
btr_search_s_unlock(index);
if (must_drop) {
btr_search_drop_page_hash_index(block); btr_search_drop_page_hash_index(block);
} else {
btr_search_s_unlock(index);
} }
/* Check that the values for hash index build are sensible */ /* Check that the values for hash index build are sensible */
@@ -1495,7 +1510,7 @@ btr_search_build_page_hash_index(
btr_search_check_free_space_in_heap(index); btr_search_check_free_space_in_heap(index);
btr_search_x_lock(index); rw_lock_x_lock(latch);
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto exit_func; goto exit_func;
@@ -1533,7 +1548,7 @@ btr_search_build_page_hash_index(
MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached); MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached);
exit_func: exit_func:
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
btr_search_x_unlock(index); rw_lock_x_unlock(latch);
ut_free(folds); ut_free(folds);
ut_free(recs); ut_free(recs);
@@ -1566,21 +1581,19 @@ btr_search_move_or_delete_hash_entries(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X));
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_X)); ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_X));
btr_search_s_lock(index); rw_lock_t* const latch = btr_get_search_latch(index);
rw_lock_s_lock(latch);
ut_a(!new_block->index || new_block->index == index); ut_a(!new_block->index || new_block->index == index);
ut_a(!block->index || block->index == index); ut_a(!block->index || block->index == index);
ut_a(!(new_block->index || block->index) ut_ad(!(new_block->index || block->index)
|| !dict_index_is_ibuf(index)); || !dict_index_is_ibuf(index));
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
assert_block_ahi_valid(new_block); assert_block_ahi_valid(new_block);
if (new_block->index) { if (new_block->index) {
rw_lock_s_unlock(latch);
btr_search_s_unlock(index);
btr_search_drop_page_hash_index(block); btr_search_drop_page_hash_index(block);
return; return;
} }
@@ -1593,7 +1606,7 @@ btr_search_move_or_delete_hash_entries(
new_block->n_bytes = block->curr_n_bytes; new_block->n_bytes = block->curr_n_bytes;
new_block->left_side = left_side; new_block->left_side = left_side;
btr_search_s_unlock(index); rw_lock_s_unlock(latch);
ut_a(n_fields > 0 || n_bytes > 0); ut_a(n_fields > 0 || n_bytes > 0);
@@ -1605,7 +1618,7 @@ btr_search_move_or_delete_hash_entries(
return; return;
} }
btr_search_s_unlock(index); rw_lock_s_unlock(latch);
} }
/** Updates the page hash index when a single record is deleted from a page. /** Updates the page hash index when a single record is deleted from a page.
@@ -1647,7 +1660,7 @@ btr_search_update_hash_on_delete(btr_cur_t* cursor)
ut_ad(block->page.id.space() == index->space); ut_ad(block->page.id.space() == index->space);
ut_a(index == cursor->index); ut_a(index == cursor->index);
ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0); ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0);
ut_a(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
table = btr_get_search_table(index); table = btr_get_search_table(index);
@@ -1660,7 +1673,8 @@ btr_search_update_hash_on_delete(btr_cur_t* cursor)
mem_heap_free(heap); mem_heap_free(heap);
} }
btr_search_x_lock(index); rw_lock_t* const latch = btr_get_search_latch(index);
rw_lock_x_lock(latch);
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
if (block->index) { if (block->index) {
@@ -1676,7 +1690,7 @@ btr_search_update_hash_on_delete(btr_cur_t* cursor)
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
} }
btr_search_x_unlock(index); rw_lock_x_unlock(latch);
} }
/** Updates the page hash index when a single record is inserted on a page. /** Updates the page hash index when a single record is inserted on a page.
@@ -1712,9 +1726,10 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor)
} }
ut_a(cursor->index == index); ut_a(cursor->index == index);
ut_a(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
btr_search_x_lock(index); rw_lock_t* const latch = btr_get_search_latch(index);
rw_lock_x_lock(latch);
if (!block->index) { if (!block->index) {
@@ -1738,10 +1753,9 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor)
func_exit: func_exit:
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
btr_search_x_unlock(index); rw_lock_x_unlock(latch);
} else { } else {
btr_search_x_unlock(index); rw_lock_x_unlock(latch);
btr_search_update_hash_on_insert(cursor); btr_search_update_hash_on_insert(cursor);
} }
} }
@@ -1765,8 +1779,6 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
ulint next_fold = 0; /* remove warning (??? bug ???) */ ulint next_fold = 0; /* remove warning (??? bug ???) */
ulint n_fields; ulint n_fields;
ulint n_bytes; ulint n_bytes;
ibool left_side;
ibool locked = FALSE;
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_; rec_offs* offsets = offsets_;
@@ -1803,11 +1815,11 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
ut_a(!index->disable_ahi); ut_a(!index->disable_ahi);
#endif #endif
ut_a(index == cursor->index); ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
n_fields = block->curr_n_fields; n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes; n_bytes = block->curr_n_bytes;
left_side = block->curr_left_side; const bool left_side = block->curr_left_side;
ins_rec = page_rec_get_next_const(rec); ins_rec = page_rec_get_next_const(rec);
next_rec = page_rec_get_next_const(ins_rec); next_rec = page_rec_get_next_const(ins_rec);
@@ -1824,17 +1836,18 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
n_bytes, index->id); n_bytes, index->id);
} }
rw_lock_t* const latch = btr_get_search_latch(index);
bool locked = false;
if (!page_rec_is_infimum(rec)) { if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets( offsets = rec_get_offsets(
rec, index, offsets, true, rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes), &heap); btr_search_get_n_fields(n_fields, n_bytes), &heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id); fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
} else { } else {
if (left_side) { locked = left_side;
if (locked) {
btr_search_x_lock(index); rw_lock_x_lock(latch);
locked = TRUE;
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto function_exit; goto function_exit;
@@ -1849,10 +1862,8 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
if (fold != ins_fold) { if (fold != ins_fold) {
if (!locked) { if (!locked) {
locked = true;
btr_search_x_lock(index); rw_lock_x_lock(latch);
locked = TRUE;
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto function_exit; goto function_exit;
@@ -1870,11 +1881,9 @@ check_next_rec:
if (page_rec_is_supremum(next_rec)) { if (page_rec_is_supremum(next_rec)) {
if (!left_side) { if (!left_side) {
if (!locked) { if (!locked) {
btr_search_x_lock(index); locked = true;
rw_lock_x_lock(latch);
locked = TRUE;
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto function_exit; goto function_exit;
@@ -1888,12 +1897,9 @@ check_next_rec:
} }
if (ins_fold != next_fold) { if (ins_fold != next_fold) {
if (!locked) { if (!locked) {
locked = true;
btr_search_x_lock(index); rw_lock_x_lock(latch);
locked = TRUE;
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto function_exit; goto function_exit;
@@ -1912,7 +1918,7 @@ function_exit:
mem_heap_free(heap); mem_heap_free(heap);
} }
if (locked) { if (locked) {
btr_search_x_unlock(index); rw_lock_x_unlock(latch);
} }
} }
@@ -2019,7 +2025,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
== BUF_BLOCK_REMOVE_HASH); == BUF_BLOCK_REMOVE_HASH);
} }
ut_a(!dict_index_is_ibuf(block->index)); ut_ad(!dict_index_is_ibuf(block->index));
ut_ad(block->page.id.space() == block->index->space); ut_ad(block->page.id.space() == block->index->space);
page_index_id = btr_page_get_index_id(block->frame); page_index_id = btr_page_get_index_id(block->frame);

View File

@@ -3165,66 +3165,6 @@ DECLARE_THREAD(buf_resize_thread)(void*)
OS_THREAD_DUMMY_RETURN; OS_THREAD_DUMMY_RETURN;
} }
#ifdef BTR_CUR_HASH_ADAPT
/** Clear the adaptive hash index on all pages in the buffer pool. */
void
buf_pool_clear_hash_index()
{
ulint p;
ut_ad(btr_search_own_all(RW_LOCK_X));
ut_ad(!buf_pool_resizing);
ut_ad(!btr_search_enabled);
for (p = 0; p < srv_buf_pool_instances; p++) {
buf_pool_t* buf_pool = buf_pool_from_array(p);
buf_chunk_t* chunks = buf_pool->chunks;
buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
while (--chunk >= chunks) {
buf_block_t* block = chunk->blocks;
ulint i = chunk->size;
for (; i--; block++) {
dict_index_t* index = block->index;
assert_block_ahi_valid(block);
/* We can set block->index = NULL
and block->n_pointers = 0
when btr_search_own_all(RW_LOCK_X);
see the comments in buf0buf.h */
if (!index) {
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(!block->n_pointers);
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
continue;
}
ut_d(buf_page_state state
= buf_block_get_state(block));
/* Another thread may have set the
state to BUF_BLOCK_REMOVE_HASH in
buf_LRU_block_remove_hashed().
The state change in buf_page_realloc()
is not observable here, because in
that case we would have !block->index.
In the end, the entire adaptive hash
index will be removed. */
ut_ad(state == BUF_BLOCK_FILE_PAGE
|| state == BUF_BLOCK_REMOVE_HASH);
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
block->n_pointers = 0;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
block->index = NULL;
}
}
}
}
#endif /* BTR_CUR_HASH_ADAPT */
/********************************************************************//** /********************************************************************//**
Relocate a buffer control block. Relocates the block on the LRU list Relocate a buffer control block. Relocates the block on the LRU list
and in buf_pool->page_hash. Does not relocate bpage->list. and in buf_pool->page_hash. Does not relocate bpage->list.
@@ -4207,7 +4147,7 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
{ {
case RW_NO_LATCH: case RW_NO_LATCH:
fix_type= MTR_MEMO_BUF_FIX; fix_type= MTR_MEMO_BUF_FIX;
break; goto done;
case RW_S_LATCH: case RW_S_LATCH:
rw_lock_s_lock_inline(&block->lock, 0, file, line); rw_lock_s_lock_inline(&block->lock, 0, file, line);
fix_type= MTR_MEMO_PAGE_S_FIX; fix_type= MTR_MEMO_PAGE_S_FIX;
@@ -4223,6 +4163,15 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
break; break;
} }
#ifdef BTR_CUR_HASH_ADAPT
{
dict_index_t *index= block->index;
if (index && index->freed())
btr_search_drop_page_hash_index(block);
}
#endif /* BTR_CUR_HASH_ADAPT */
done:
mtr_memo_push(mtr, block, fix_type); mtr_memo_push(mtr, block, fix_type);
return block; return block;
} }
@@ -4540,6 +4489,7 @@ evict_from_pool:
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
return(NULL); return(NULL);
} }
break; break;
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
@@ -5087,9 +5037,11 @@ buf_page_get_known_nowait(
buf_pool = buf_pool_from_block(block); buf_pool = buf_pool_from_block(block);
#ifdef BTR_CUR_HASH_ADAPT
if (mode == BUF_MAKE_YOUNG) { if (mode == BUF_MAKE_YOUNG) {
buf_page_make_young_if_needed(&block->page); buf_page_make_young_if_needed(&block->page);
} }
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD); ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
@@ -5132,9 +5084,12 @@ buf_page_get_known_nowait(
deleting a record from SYS_INDEXES. This check will be deleting a record from SYS_INDEXES. This check will be
skipped in recv_recover_page() as well. */ skipped in recv_recover_page() as well. */
buf_page_mutex_enter(block); # ifdef BTR_CUR_HASH_ADAPT
ut_a(!block->page.file_page_was_freed); ut_ad(!block->page.file_page_was_freed
buf_page_mutex_exit(block); || (block->index && block->index->freed()));
# else /* BTR_CUR_HASH_ADAPT */
ut_ad(!block->page.file_page_was_freed);
# endif /* BTR_CUR_HASH_ADAPT */
} }
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
@@ -5627,6 +5582,12 @@ buf_page_create(
rw_lock_x_unlock(hash_lock); rw_lock_x_unlock(hash_lock);
buf_block_free(free_block); buf_block_free(free_block);
#ifdef BTR_CUR_HASH_ADAPT
if (block->page.state == BUF_BLOCK_FILE_PAGE
&& UNIV_LIKELY_NULL(block->index)) {
btr_search_drop_page_hash_index(block);
}
#endif /* BTR_CUR_HASH_ADAPT */
if (!recv_recovery_is_on()) { if (!recv_recovery_is_on()) {
return buf_page_get_with_no_latch(page_id, page_size, return buf_page_get_with_no_latch(page_id, page_size,

View File

@@ -222,166 +222,6 @@ buf_LRU_evict_from_unzip_LRU(
} }
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/** Attempts to drop page hash index on a batch of pages belonging to a
particular space id.
@param[in] space_id space id
@param[in] arr array of page_no
@param[in] count number of entries in array */
static
void
buf_LRU_drop_page_hash_batch(ulint space_id, const ulint* arr, ulint count)
{
ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
for (const ulint* const end = arr + count; arr != end; ) {
/* While our only caller
buf_LRU_drop_page_hash_for_tablespace()
is being executed for DROP TABLE or similar,
the table cannot be evicted from the buffer pool. */
btr_search_drop_page_hash_when_freed(
page_id_t(space_id, *arr++));
}
}
/******************************************************************//**
When doing a DROP TABLE/DISCARD TABLESPACE we have to drop all page
hash index entries belonging to that table. This function tries to
do that in batch. Note that this is a 'best effort' attempt and does
not guarantee that ALL hash entries will be removed. */
static
void
buf_LRU_drop_page_hash_for_tablespace(
/*==================================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint id) /*!< in: space id */
{
ulint* page_arr = static_cast<ulint*>(ut_malloc_nokey(
sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE));
ulint num_entries = 0;
buf_pool_mutex_enter(buf_pool);
scan_again:
for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
bpage != NULL;
/* No op */) {
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
ut_a(buf_page_in_file(bpage));
if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE
|| bpage->id.space() != id
|| bpage->io_fix != BUF_IO_NONE) {
/* Compressed pages are never hashed.
Skip blocks of other tablespaces.
Skip I/O-fixed blocks (to be dealt with later). */
next_page:
bpage = prev_bpage;
continue;
}
buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
mutex_enter(&block->mutex);
/* This debug check uses a dirty read that could
theoretically cause false positives while
buf_pool_clear_hash_index() is executing.
(Other conflicting access paths to the adaptive hash
index should not be possible, because when a
tablespace is being discarded or dropped, there must
be no concurrect access to the contained tables.) */
assert_block_ahi_valid(block);
bool skip = bpage->buf_fix_count > 0 || !block->index;
mutex_exit(&block->mutex);
if (skip) {
/* Skip this block, because there are
no adaptive hash index entries
pointing to it, or because we cannot
drop them due to the buffer-fix. */
goto next_page;
}
/* Store the page number so that we can drop the hash
index in a batch later. */
page_arr[num_entries] = bpage->id.page_no();
ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE);
++num_entries;
if (num_entries < BUF_LRU_DROP_SEARCH_SIZE) {
goto next_page;
}
/* Array full. We release the buf_pool->mutex to obey
the latching order. */
buf_pool_mutex_exit(buf_pool);
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
num_entries = 0;
buf_pool_mutex_enter(buf_pool);
/* Note that we released the buf_pool mutex above
after reading the prev_bpage during processing of a
page_hash_batch (i.e.: when the array was full).
Because prev_bpage could belong to a compressed-only
block, it may have been relocated, and thus the
pointer cannot be trusted. Because bpage is of type
buf_block_t, it is safe to dereference.
bpage can change in the LRU list. This is OK because
this function is a 'best effort' to drop as many
search hash entries as possible and it does not
guarantee that ALL such entries will be dropped. */
/* If, however, bpage has been removed from LRU list
to the free list then we should restart the scan.
bpage->state is protected by buf_pool mutex. */
if (bpage != NULL
&& buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
goto scan_again;
}
}
buf_pool_mutex_exit(buf_pool);
/* Drop any remaining batch of search hashed pages. */
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
ut_free(page_arr);
}
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
{
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
if (btr_search_info_get_ref_count(btr_search_get_info(index),
index)) {
goto drop_ahi;
}
}
return false;
drop_ahi:
ulint id = table->space;
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
id);
}
return true;
}
/******************************************************************//** /******************************************************************//**
While flushing (or removing dirty) pages from a tablespace we don't While flushing (or removing dirty) pages from a tablespace we don't
want to hog the CPU and resources. Release the buffer pool and block want to hog the CPU and resources. Release the buffer pool and block

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation. Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -26,7 +26,9 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0crea.h" #include "dict0crea.h"
#include "btr0pcur.h" #include "btr0pcur.h"
#include "btr0btr.h" #ifdef BTR_CUR_HASH_ADAPT
# include "btr0sea.h"
#endif /* BTR_CUR_HASH_ADAPT */
#include "page0page.h" #include "page0page.h"
#include "mach0data.h" #include "mach0data.h"
#include "dict0boot.h" #include "dict0boot.h"
@@ -1528,6 +1530,9 @@ dict_create_index_step(
&node->table->fts->cache->init_lock); &node->table->fts->cache->init_lock);
} }
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!node->index->search_info->ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(node->table, node->index); dict_index_remove_from_cache(node->table, node->index);
node->index = NULL; node->index = NULL;

View File

@@ -1361,25 +1361,12 @@ dict_table_can_be_evicted(
} }
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/* We cannot really evict the table if adaptive hash
index entries are pointing to any of its indexes. */
for (dict_index_t* index = dict_table_get_first_index(table); for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL; index != NULL;
index = dict_table_get_next_index(index)) { index = dict_table_get_next_index(index)) {
if (index->n_ahi_pages()) {
btr_search_t* info = btr_search_get_info(index);
/* We are not allowed to free the in-memory index
struct dict_index_t until all entries in the adaptive
hash index that point to any of the page belonging to
his b-tree index are dropped. This is so because
dropping of these entries require access to
dict_index_t struct. To avoid such scenario we keep
a count of number of such pages in the search_info and
only free the dict_index_t struct when this count
drops to zero.
See also: dict_index_remove_from_cache_low() */
if (btr_search_info_get_ref_count(info, index) > 0) {
return(FALSE); return(FALSE);
} }
} }
@@ -1391,6 +1378,71 @@ dict_table_can_be_evicted(
return(FALSE); return(FALSE);
} }
#ifdef BTR_CUR_HASH_ADAPT
/** @return a clone of this */
dict_index_t *dict_index_t::clone() const
{
ut_ad(n_fields);
ut_ad(!(type & (DICT_IBUF | DICT_SPATIAL | DICT_FTS)));
ut_ad(online_status == ONLINE_INDEX_COMPLETE);
ut_ad(is_committed());
ut_ad(!is_dummy);
ut_ad(!parser);
ut_ad(!index_fts_syncing);
ut_ad(!online_log);
ut_ad(!rtr_track);
const size_t size= sizeof *this + n_fields * sizeof(*fields) +
#ifdef BTR_CUR_ADAPT
sizeof *search_info +
#endif
1 + strlen(name) +
n_uniq * (sizeof *stat_n_diff_key_vals +
sizeof *stat_n_sample_sizes +
sizeof *stat_n_non_null_key_vals);
mem_heap_t* heap= mem_heap_create(size);
dict_index_t *index= static_cast<dict_index_t*>(mem_heap_dup(heap, this,
sizeof *this));
*index= *this;
rw_lock_create(index_tree_rw_lock_key, &index->lock, SYNC_INDEX_TREE);
index->heap= heap;
index->name= mem_heap_strdup(heap, name);
index->fields= static_cast<dict_field_t*>
(mem_heap_dup(heap, fields, n_fields * sizeof *fields));
#ifdef BTR_CUR_ADAPT
index->search_info= btr_search_info_create(index->heap);
#endif /* BTR_CUR_ADAPT */
index->stat_n_diff_key_vals= static_cast<ib_uint64_t*>
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_diff_key_vals));
index->stat_n_sample_sizes= static_cast<ib_uint64_t*>
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_sample_sizes));
index->stat_n_non_null_key_vals= static_cast<ib_uint64_t*>
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_non_null_key_vals));
memset(&index->zip_pad, 0, sizeof index->zip_pad);
return index;
}
/** Clone this index for lazy dropping of the adaptive hash.
@return this or a clone */
dict_index_t *dict_index_t::clone_if_needed()
{
if (!search_info->ref_count)
return this;
dict_index_t *prev= UT_LIST_GET_PREV(indexes, this);
UT_LIST_REMOVE(table->indexes, this);
UT_LIST_ADD_LAST(table->freed_indexes, this);
dict_index_t *index= clone();
set_freed();
if (prev)
UT_LIST_INSERT_AFTER(table->indexes, prev, index);
else
UT_LIST_ADD_FIRST(table->indexes, index);
return index;
}
#endif /* BTR_CUR_HASH_ADAPT */
/**********************************************************************//** /**********************************************************************//**
Make room in the table cache by evicting an unused table. The unused table Make room in the table cache by evicting an unused table. The unused table
should not be part of FK relationship and currently not used in any user should not be part of FK relationship and currently not used in any user
@@ -2081,6 +2133,14 @@ dict_table_remove_from_cache_low(
UT_DELETE(table->vc_templ); UT_DELETE(table->vc_templ);
} }
#ifdef BTR_CUR_HASH_ADAPT
if (UNIV_UNLIKELY(UT_LIST_GET_LEN(table->freed_indexes) != 0)) {
table->vc_templ = NULL;
table->id = 0;
return;
}
#endif /* BTR_CUR_HASH_ADAPT */
dict_mem_table_free(table); dict_mem_table_free(table);
} }
@@ -2310,6 +2370,8 @@ dict_index_remove_from_cache_low(
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(table->id);
ut_ad(!index->freed());
/* No need to acquire the dict_index_t::lock here because /* No need to acquire the dict_index_t::lock here because
there can't be any active operations on this index (or table). */ there can't be any active operations on this index (or table). */
@@ -2319,13 +2381,22 @@ dict_index_remove_from_cache_low(
row_log_free(index->online_log); row_log_free(index->online_log);
} }
/* Remove the index from the list of indexes of the table */
UT_LIST_REMOVE(table->indexes, index);
/* The index is being dropped, remove any compression stats for it. */
if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) {
mutex_enter(&page_zip_stat_per_index_mutex);
page_zip_stat_per_index.erase(index->id);
mutex_exit(&page_zip_stat_per_index_mutex);
}
/* Remove the index from affected virtual column index list */
index->detach_columns();
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/* We always create search info whether or not adaptive /* We always create search info whether or not adaptive
hash index is enabled or not. */ hash index is enabled or not. */
btr_search_t* info = btr_search_get_info(index);
ulint retries = 0;
ut_ad(info);
/* We are not allowed to free the in-memory index struct /* We are not allowed to free the in-memory index struct
dict_index_t until all entries in the adaptive hash index dict_index_t until all entries in the adaptive hash index
that point to any of the page belonging to his b-tree index that point to any of the page belonging to his b-tree index
@@ -2335,31 +2406,15 @@ dict_index_remove_from_cache_low(
only free the dict_index_t struct when this count drops to only free the dict_index_t struct when this count drops to
zero. See also: dict_table_can_be_evicted() */ zero. See also: dict_table_can_be_evicted() */
do { if (index->n_ahi_pages()) {
if (!btr_search_info_get_ref_count(info, index) index->set_freed();
|| !buf_LRU_drop_page_hash_for_tablespace(table)) { UT_LIST_ADD_LAST(table->freed_indexes, index);
break; return;
} }
ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
rw_lock_free(&index->lock); rw_lock_free(&index->lock);
/* The index is being dropped, remove any compression stats for it. */
if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) {
mutex_enter(&page_zip_stat_per_index_mutex);
page_zip_stat_per_index.erase(index->id);
mutex_exit(&page_zip_stat_per_index_mutex);
}
/* Remove the index from the list of indexes of the table */
UT_LIST_REMOVE(table->indexes, index);
/* Remove the index from affected virtual column index list */
index->detach_columns();
dict_mem_index_free(index); dict_mem_index_free(index);
} }
@@ -5698,7 +5753,7 @@ dict_set_corrupted(
btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_LE, btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_LE,
BTR_MODIFY_LEAF, BTR_MODIFY_LEAF,
&cursor, 0, __FILE__, __LINE__, &mtr); &cursor, 0, __FILE__, __LINE__, &mtr, 0);
if (cursor.low_match == dtuple_get_n_fields(tuple)) { if (cursor.low_match == dtuple_get_n_fields(tuple)) {
/* UPDATE SYS_INDEXES SET TYPE=index->type /* UPDATE SYS_INDEXES SET TYPE=index->type
@@ -5800,7 +5855,7 @@ dict_index_set_merge_threshold(
btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_GE, btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_GE,
BTR_MODIFY_LEAF, BTR_MODIFY_LEAF,
&cursor, 0, __FILE__, __LINE__, &mtr); &cursor, 0, __FILE__, __LINE__, &mtr, 0);
if (cursor.up_match == dtuple_get_n_fields(tuple) if (cursor.up_match == dtuple_get_n_fields(tuple)
&& rec_get_n_fields_old(btr_cur_get_rec(&cursor)) && rec_get_n_fields_old(btr_cur_get_rec(&cursor))

View File

@@ -126,6 +126,9 @@ dict_mem_table_create(
lock_table_lock_list_init(&table->locks); lock_table_lock_list_init(&table->locks);
UT_LIST_INIT(table->indexes, &dict_index_t::indexes); UT_LIST_INIT(table->indexes, &dict_index_t::indexes);
#ifdef BTR_CUR_HASH_ADAPT
UT_LIST_INIT(table->freed_indexes, &dict_index_t::indexes);
#endif /* BTR_CUR_HASH_ADAPT */
table->heap = heap; table->heap = heap;
@@ -181,6 +184,10 @@ dict_mem_table_free(
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(UT_LIST_GET_LEN(table->indexes) == 0);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(UT_LIST_GET_LEN(table->freed_indexes) == 0);
#endif /* BTR_CUR_HASH_ADAPT */
ut_d(table->cached = FALSE); ut_d(table->cached = FALSE);
if (dict_table_has_fts_index(table) if (dict_table_has_fts_index(table)

View File

@@ -424,6 +424,9 @@ dict_stats_table_clone_create(
dict_table_stats_latch_create(t, false); dict_table_stats_latch_create(t, false);
UT_LIST_INIT(t->indexes, &dict_index_t::indexes); UT_LIST_INIT(t->indexes, &dict_index_t::indexes);
#ifdef BTR_CUR_HASH_ADAPT
UT_LIST_INIT(t->freed_indexes, &dict_index_t::indexes);
#endif /* BTR_CUR_HASH_ADAPT */
for (index = dict_table_get_first_index(table); for (index = dict_table_get_first_index(table);
index != NULL; index != NULL;
@@ -4005,6 +4008,9 @@ test_dict_stats_save()
table.stat_clustered_index_size = TEST_CLUSTERED_INDEX_SIZE; table.stat_clustered_index_size = TEST_CLUSTERED_INDEX_SIZE;
table.stat_sum_of_other_index_sizes = TEST_SUM_OF_OTHER_INDEX_SIZES; table.stat_sum_of_other_index_sizes = TEST_SUM_OF_OTHER_INDEX_SIZES;
UT_LIST_INIT(table.indexes, &dict_index_t::indexes); UT_LIST_INIT(table.indexes, &dict_index_t::indexes);
#ifdef BTR_CUR_HASH_ADAPT
UT_LIST_INIT(table.freed_indexes, &dict_index_t::indexes);
#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(table.indexes, &index1); UT_LIST_ADD_LAST(table.indexes, &index1);
UT_LIST_ADD_LAST(table.indexes, &index2); UT_LIST_ADD_LAST(table.indexes, &index2);
ut_d(table.magic_n = DICT_TABLE_MAGIC_N); ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
@@ -4154,6 +4160,9 @@ test_dict_stats_fetch_from_ps()
/* craft a dummy dict_table_t */ /* craft a dummy dict_table_t */
table.name.m_name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME); table.name.m_name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME);
UT_LIST_INIT(table.indexes, &dict_index_t::indexes); UT_LIST_INIT(table.indexes, &dict_index_t::indexes);
#ifdef BTR_CUR_HASH_ADAPT
UT_LIST_INIT(table.freed_indexes, &dict_index_t::indexes);
#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(table.indexes, &index1); UT_LIST_ADD_LAST(table.indexes, &index1);
UT_LIST_ADD_LAST(table.indexes, &index2); UT_LIST_ADD_LAST(table.indexes, &index2);
ut_d(table.magic_n = DICT_TABLE_MAGIC_N); ut_d(table.magic_n = DICT_TABLE_MAGIC_N);

View File

@@ -2976,8 +2976,6 @@ fseg_mark_page_used(
@param[in,out] space tablespace @param[in,out] space tablespace
@param[in] offset page number @param[in] offset page number
@param[in] page_size page size @param[in] page_size page size
@param[in] ahi whether we may need to drop the adaptive
hash index
@param[in,out] mtr mini-transaction */ @param[in,out] mtr mini-transaction */
static static
void void
@@ -2986,9 +2984,6 @@ fseg_free_page_low(
fil_space_t* space, fil_space_t* space,
page_no_t offset, page_no_t offset,
const page_size_t& page_size, const page_size_t& page_size,
#ifdef BTR_CUR_HASH_ADAPT
bool ahi,
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) mtr_t* mtr)
{ {
xdes_t* descr; xdes_t* descr;
@@ -3003,15 +2998,6 @@ fseg_free_page_low(
== FSEG_MAGIC_N_VALUE); == FSEG_MAGIC_N_VALUE);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_d(space->modify_check(*mtr)); ut_d(space->modify_check(*mtr));
#ifdef BTR_CUR_HASH_ADAPT
/* Drop search system page hash index if the page is found in
the pool and is hashed */
if (ahi) {
btr_search_drop_page_hash_when_freed(
page_id_t(space->id, offset));
}
#endif /* BTR_CUR_HASH_ADAPT */
descr = xdes_get_descriptor(space, offset, page_size, mtr); descr = xdes_get_descriptor(space, offset, page_size, mtr);
@@ -3097,22 +3083,13 @@ fseg_free_page_low(
} }
} }
#ifndef BTR_CUR_HASH_ADAPT
# define fseg_free_page_low(inode, space, offset, page_size, ahi, mtr) \
fseg_free_page_low(inode, space, offset, page_size, mtr)
#endif /* !BTR_CUR_HASH_ADAPT */
/**********************************************************************//** /**********************************************************************//**
Frees a single page of a segment. */ Frees a single page of a segment. */
void void
fseg_free_page_func( fseg_free_page(
fseg_header_t* seg_header, /*!< in: segment header */ fseg_header_t* seg_header, /*!< in: segment header */
ulint space_id,/*!< in: space id */ ulint space_id,/*!< in: space id */
ulint page, /*!< in: page offset */ ulint page, /*!< in: page offset */
#ifdef BTR_CUR_HASH_ADAPT
bool ahi, /*!< in: whether we may need to drop
the adaptive hash index */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */ mtr_t* mtr) /*!< in/out: mini-transaction */
{ {
DBUG_ENTER("fseg_free_page"); DBUG_ENTER("fseg_free_page");
@@ -3128,7 +3105,7 @@ fseg_free_page_func(
&iblock); &iblock);
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr); fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
fseg_free_page_low(seg_inode, space, page, page_size, ahi, mtr); fseg_free_page_low(seg_inode, space, page, page_size, mtr);
ut_d(buf_page_set_file_page_was_freed(page_id_t(space_id, page))); ut_d(buf_page_set_file_page_was_freed(page_id_t(space_id, page)));
@@ -3169,8 +3146,6 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
@param[in,out] space tablespace @param[in,out] space tablespace
@param[in] page_size page size @param[in] page_size page size
@param[in] page page number in the extent @param[in] page page number in the extent
@param[in] ahi whether we may need to drop
the adaptive hash index
@param[in,out] mtr mini-transaction */ @param[in,out] mtr mini-transaction */
MY_ATTRIBUTE((nonnull)) MY_ATTRIBUTE((nonnull))
static static
@@ -3180,9 +3155,6 @@ fseg_free_extent(
fil_space_t* space, fil_space_t* space,
const page_size_t& page_size, const page_size_t& page_size,
ulint page, ulint page,
#ifdef BTR_CUR_HASH_ADAPT
bool ahi,
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) mtr_t* mtr)
{ {
xdes_t* descr; xdes_t* descr;
@@ -3203,23 +3175,6 @@ fseg_free_extent(
const ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE); const ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
#endif /* BTR_CUR_HASH_ADAPT || UNIV_DEBUG */ #endif /* BTR_CUR_HASH_ADAPT || UNIV_DEBUG */
#ifdef BTR_CUR_HASH_ADAPT
if (ahi) {
for (ulint i = 0; i < FSP_EXTENT_SIZE; i++) {
if (!xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
/* Drop search system page hash index
if the page is found in the pool and
is hashed */
btr_search_drop_page_hash_when_freed(
page_id_t(space->id,
first_page_in_extent + i));
}
}
}
#endif /* BTR_CUR_HASH_ADAPT */
if (xdes_is_full(descr, mtr)) { if (xdes_is_full(descr, mtr)) {
flst_remove(seg_inode + FSEG_FULL, flst_remove(seg_inode + FSEG_FULL,
descr + XDES_FLST_NODE, mtr); descr + XDES_FLST_NODE, mtr);
@@ -3251,27 +3206,18 @@ fseg_free_extent(
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
} }
#ifndef BTR_CUR_HASH_ADAPT
# define fseg_free_extent(inode, space, page_size, page, ahi, mtr) \
fseg_free_extent(inode, space, page_size, page, mtr)
#endif /* !BTR_CUR_HASH_ADAPT */
/**********************************************************************//** /**********************************************************************//**
Frees part of a segment. This function can be used to free a segment by Frees part of a segment. This function can be used to free a segment by
repeatedly calling this function in different mini-transactions. Doing repeatedly calling this function in different mini-transactions. Doing
the freeing in a single mini-transaction might result in too big a the freeing in a single mini-transaction might result in too big a
mini-transaction. mini-transaction.
@return TRUE if freeing completed */ @return whether the freeing was completed */
ibool bool
fseg_free_step_func( fseg_free_step(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list resides on the first page of the frag list
of the segment, this pointer becomes obsolete of the segment, this pointer becomes obsolete
after the last freeing step */ after the last freeing step */
#ifdef BTR_CUR_HASH_ADAPT
bool ahi, /*!< in: whether we may need to drop
the adaptive hash index */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */ mtr_t* mtr) /*!< in/out: mini-transaction */
{ {
ulint n; ulint n;
@@ -3303,7 +3249,7 @@ fseg_free_step_func(
if (inode == NULL) { if (inode == NULL) {
ib::info() << "Double free of inode from " ib::info() << "Double free of inode from "
<< page_id_t(space_id, header_page); << page_id_t(space_id, header_page);
DBUG_RETURN(TRUE); DBUG_RETURN(true);
} }
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr); fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
@@ -3313,9 +3259,9 @@ fseg_free_step_func(
/* Free the extent held by the segment */ /* Free the extent held by the segment */
page = xdes_get_offset(descr); page = xdes_get_offset(descr);
fseg_free_extent(inode, space, page_size, page, ahi, mtr); fseg_free_extent(inode, space, page_size, page, mtr);
DBUG_RETURN(FALSE); DBUG_RETURN(false);
} }
/* Free a frag page */ /* Free a frag page */
@@ -3325,13 +3271,13 @@ fseg_free_step_func(
/* Freeing completed: free the segment inode */ /* Freeing completed: free the segment inode */
fsp_free_seg_inode(space, page_size, inode, mtr); fsp_free_seg_inode(space, page_size, inode, mtr);
DBUG_RETURN(TRUE); DBUG_RETURN(true);
} }
fseg_free_page_low( fseg_free_page_low(
inode, space, inode, space,
fseg_get_nth_frag_page_no(inode, n, mtr), fseg_get_nth_frag_page_no(inode, n, mtr),
page_size, ahi, mtr); page_size, mtr);
n = fseg_find_last_used_frag_page_slot(inode, mtr); n = fseg_find_last_used_frag_page_slot(inode, mtr);
@@ -3339,24 +3285,20 @@ fseg_free_step_func(
/* Freeing completed: free the segment inode */ /* Freeing completed: free the segment inode */
fsp_free_seg_inode(space, page_size, inode, mtr); fsp_free_seg_inode(space, page_size, inode, mtr);
DBUG_RETURN(TRUE); DBUG_RETURN(true);
} }
DBUG_RETURN(FALSE); DBUG_RETURN(false);
} }
/**********************************************************************//** /**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed. leaves the header page unfreed.
@return TRUE if freeing completed, except the header page */ @return whether the freeing was completed, except for the header page */
ibool bool
fseg_free_step_not_header_func( fseg_free_step_not_header(
fseg_header_t* header, /*!< in: segment header which must reside on fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */ the first fragment page of the segment */
#ifdef BTR_CUR_HASH_ADAPT
bool ahi, /*!< in: whether we may need to drop
the adaptive hash index */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */ mtr_t* mtr) /*!< in/out: mini-transaction */
{ {
ulint n; ulint n;
@@ -3382,29 +3324,27 @@ fseg_free_step_not_header_func(
/* Free the extent held by the segment */ /* Free the extent held by the segment */
page = xdes_get_offset(descr); page = xdes_get_offset(descr);
fseg_free_extent(inode, space, page_size, page, ahi, mtr); fseg_free_extent(inode, space, page_size, page, mtr);
return(FALSE); return false;
} }
/* Free a frag page */ /* Free a frag page */
n = fseg_find_last_used_frag_page_slot(inode, mtr); n = fseg_find_last_used_frag_page_slot(inode, mtr);
if (n == ULINT_UNDEFINED) { ut_a(n != ULINT_UNDEFINED);
ut_error;
}
page_no = fseg_get_nth_frag_page_no(inode, n, mtr); page_no = fseg_get_nth_frag_page_no(inode, n, mtr);
if (page_no == page_get_page_no(page_align(header))) { if (page_no == page_get_page_no(page_align(header))) {
return(TRUE); return true;
} }
fseg_free_page_low(inode, space, page_no, page_size, ahi, mtr); fseg_free_page_low(inode, space, page_no, page_size, mtr);
return(FALSE); return false;
} }
/** Returns the first extent descriptor for a segment. /** Returns the first extent descriptor for a segment.

View File

@@ -590,7 +590,7 @@ rtr_pcur_open_low(
} }
btr_cur_search_to_nth_level(index, level, tuple, mode, latch_mode, btr_cur_search_to_nth_level(index, level, tuple, mode, latch_mode,
btr_cursor, 0, file, line, mtr); btr_cursor, 0, file, line, mtr, 0);
cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->pos_state = BTR_PCUR_IS_POSITIONED;
cursor->trx_if_known = NULL; cursor->trx_if_known = NULL;
@@ -759,7 +759,7 @@ static void rtr_get_father_node(
btr_cur_search_to_nth_level( btr_cur_search_to_nth_level(
index, level, tuple, PAGE_CUR_RTREE_LOCATE, index, level, tuple, PAGE_CUR_RTREE_LOCATE,
BTR_CONT_MODIFY_TREE, btr_cur, 0, BTR_CONT_MODIFY_TREE, btr_cur, 0,
__FILE__, __LINE__, mtr); __FILE__, __LINE__, mtr, 0);
} else { } else {
/* btr_validate */ /* btr_validate */
@@ -769,7 +769,7 @@ static void rtr_get_father_node(
btr_cur_search_to_nth_level( btr_cur_search_to_nth_level(
index, level, tuple, PAGE_CUR_RTREE_LOCATE, index, level, tuple, PAGE_CUR_RTREE_LOCATE,
BTR_CONT_MODIFY_TREE, btr_cur, 0, BTR_CONT_MODIFY_TREE, btr_cur, 0,
__FILE__, __LINE__, mtr); __FILE__, __LINE__, mtr, 0);
rec = btr_cur_get_rec(btr_cur); rec = btr_cur_get_rec(btr_cur);
n_fields = dtuple_get_n_fields_cmp(tuple); n_fields = dtuple_get_n_fields_cmp(tuple);

View File

@@ -254,6 +254,7 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
for (ulint i = 0; i < num_to_add_index; i++) { for (ulint i = 0; i < num_to_add_index; i++) {
if (!add_index[i]->is_committed()) { if (!add_index[i]->is_committed()) {
add_index[i]->detach_columns(); add_index[i]->detach_columns();
add_index[i]->n_fields = 0;
} }
} }
} }
@@ -8689,21 +8690,14 @@ foreign_fail:
} }
if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) { if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) {
/* FIXME: this workaround does not seem to work with
partitioned tables */
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1); DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
trx_commit_for_mysql(m_prebuilt->trx); trx_commit_for_mysql(m_prebuilt->trx);
#ifdef BTR_CUR_HASH_ADAPT
if (btr_search_enabled) {
btr_search_disable(false);
btr_search_enable();
}
#endif /* BTR_CUR_HASH_ADAPT */
char tb_name[FN_REFLEN];
ut_strcpy(tb_name, m_prebuilt->table->name.m_name);
tb_name[strlen(m_prebuilt->table->name.m_name)] = 0;
char tb_name[NAME_LEN * 2 + 1 + 1];
strcpy(tb_name, m_prebuilt->table->name.m_name);
dict_table_close(m_prebuilt->table, true, false); dict_table_close(m_prebuilt->table, true, false);
dict_table_remove_from_cache(m_prebuilt->table); dict_table_remove_from_cache(m_prebuilt->table);
m_prebuilt->table = dict_table_open_on_name( m_prebuilt->table = dict_table_open_on_name(

View File

@@ -2109,7 +2109,7 @@ ibuf_remove_free_page(void)
page from it. */ page from it. */
fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER, fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
IBUF_SPACE_ID, page_no, false, &mtr); IBUF_SPACE_ID, page_no, &mtr);
const page_id_t page_id(IBUF_SPACE_ID, page_no); const page_id_t page_id(IBUF_SPACE_ID, page_no);

View File

@@ -154,8 +154,7 @@ Note that if mode is PAGE_CUR_LE, which is used in inserts, then
cursor->up_match and cursor->low_match both will have sensible values. cursor->up_match and cursor->low_match both will have sensible values.
If mode is PAGE_CUR_GE, then up_match will a have a sensible value. */ If mode is PAGE_CUR_GE, then up_match will a have a sensible value. */
dberr_t dberr_t
btr_cur_search_to_nth_level( btr_cur_search_to_nth_level_func(
/*========================*/
dict_index_t* index, /*!< in: index */ dict_index_t* index, /*!< in: index */
ulint level, /*!< in: the tree level of search */ ulint level, /*!< in: the tree level of search */
const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in
@@ -181,16 +180,24 @@ btr_cur_search_to_nth_level(
to protect the record! */ to protect the record! */
btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */ s- or x-latched, but see also above! */
#ifdef BTR_CUR_HASH_ADAPT
ulint has_search_latch, ulint has_search_latch,
/*!< in: latch mode the caller /*!< in: latch mode the caller
currently has on search system: currently has on search system:
RW_S_LATCH, or 0 */ RW_S_LATCH, or 0 */
#endif /* BTR_CUR_HASH_ADAPT */
const char* file, /*!< in: file name */ const char* file, /*!< in: file name */
unsigned line, /*!< in: line where called */ unsigned line, /*!< in: line where called */
mtr_t* mtr, /*!< in/out: mini-transaction */ mtr_t* mtr, /*!< in/out: mini-transaction */
ib_uint64_t autoinc = 0); ib_uint64_t autoinc);
/*!< in: PAGE_ROOT_AUTO_INC to be written /*!< in: PAGE_ROOT_AUTO_INC to be written
(0 if none) */ (0 if none) */
#ifdef BTR_CUR_HASH_ADAPT
# define btr_cur_search_to_nth_level btr_cur_search_to_nth_level_func
#else /* BTR_CUR_HASH_ADAPT */
# define btr_cur_search_to_nth_level(ix,lv,t,md,l,cur,has,file,line,m,ai) \
btr_cur_search_to_nth_level_func(ix,lv,t,md,l,cur,file,line,m,ai)
#endif /* BTR_CUR_HASH_ADAPT */
/*****************************************************************//** /*****************************************************************//**
Opens a cursor at either end of an index. Opens a cursor at either end of an index.

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation. Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -136,15 +136,22 @@ btr_pcur_open_with_no_init_func(
page, but assume that the caller uses his page, but assume that the caller uses his
btr search latch to protect the record! */ btr search latch to protect the record! */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
#ifdef BTR_CUR_HASH_ADAPT
ulint has_search_latch, ulint has_search_latch,
/*!< in: latch mode the caller /*!< in: latch mode the caller
currently has on search system: currently has on search system:
RW_S_LATCH, or 0 */ RW_S_LATCH, or 0 */
#endif /* BTR_CUR_HASH_ADAPT */
const char* file, /*!< in: file name */ const char* file, /*!< in: file name */
unsigned line, /*!< in: line where called */ unsigned line, /*!< in: line where called */
mtr_t* mtr); /*!< in: mtr */ mtr_t* mtr); /*!< in: mtr */
#define btr_pcur_open_with_no_init(ix,t,md,l,cur,has,m) \ #ifdef BTR_CUR_HASH_ADAPT
# define btr_pcur_open_with_no_init(ix,t,md,l,cur,has,m) \
btr_pcur_open_with_no_init_func(ix,t,md,l,cur,has,__FILE__,__LINE__,m) btr_pcur_open_with_no_init_func(ix,t,md,l,cur,has,__FILE__,__LINE__,m)
#else /* BTR_CUR_HASH_ADAPT */
# define btr_pcur_open_with_no_init(ix,t,md,l,cur,has,m) \
btr_pcur_open_with_no_init_func(ix,t,md,l,cur,__FILE__,__LINE__,m)
#endif /* BTR_CUR_HASH_ADAPT */
/*****************************************************************//** /*****************************************************************//**
Opens a persistent cursor at either end of an index. */ Opens a persistent cursor at either end of an index. */

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2019, MariaDB Corporation. Copyright (c) 2015, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -496,10 +496,12 @@ btr_pcur_open_with_no_init_func(
page, but assume that the caller uses his page, but assume that the caller uses his
btr search latch to protect the record! */ btr search latch to protect the record! */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
#ifdef BTR_CUR_HASH_ADAPT
ulint has_search_latch, ulint has_search_latch,
/*!< in: latch mode the caller /*!< in: latch mode the caller
currently has on search system: currently has on search system:
RW_S_LATCH, or 0 */ RW_S_LATCH, or 0 */
#endif /* BTR_CUR_HASH_ADAPT */
const char* file, /*!< in: file name */ const char* file, /*!< in: file name */
unsigned line, /*!< in: line where called */ unsigned line, /*!< in: line where called */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
@@ -516,7 +518,7 @@ btr_pcur_open_with_no_init_func(
err = btr_cur_search_to_nth_level( err = btr_cur_search_to_nth_level(
index, 0, tuple, mode, latch_mode, btr_cursor, index, 0, tuple, mode, latch_mode, btr_cursor,
has_search_latch, file, line, mtr); has_search_latch, file, line, mtr, 0);
cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->pos_state = BTR_PCUR_IS_POSITIONED;

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, MariaDB Corporation. Copyright (c) 2018, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -54,15 +54,6 @@ btr_search_disable(
void void
btr_search_enable(); btr_search_enable();
/** Returns the value of ref_count. The value is protected by latch.
@param[in] info search info
@param[in] index index identifier
@return ref_count value. */
ulint
btr_search_info_get_ref_count(
btr_search_t* info,
dict_index_t* index);
/*********************************************************************//** /*********************************************************************//**
Updates the search info. */ Updates the search info. */
UNIV_INLINE UNIV_INLINE
@@ -156,18 +147,6 @@ btr_search_update_hash_on_delete(btr_cur_t* cursor);
bool bool
btr_search_validate(); btr_search_validate();
/** X-Lock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_x_lock(const dict_index_t* index);
/** X-Unlock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_x_unlock(const dict_index_t* index);
/** Lock all search latches in exclusive mode. */ /** Lock all search latches in exclusive mode. */
UNIV_INLINE UNIV_INLINE
void void
@@ -178,18 +157,6 @@ UNIV_INLINE
void void
btr_search_x_unlock_all(); btr_search_x_unlock_all();
/** S-Lock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_s_lock(const dict_index_t* index);
/** S-Unlock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_s_unlock(const dict_index_t* index);
/** Lock all search latches in shared mode. */ /** Lock all search latches in shared mode. */
UNIV_INLINE UNIV_INLINE
void void
@@ -236,12 +203,8 @@ btr_get_search_table(const dict_index_t* index);
#else /* BTR_CUR_HASH_ADAPT */ #else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create(size) # define btr_search_sys_create(size)
# define btr_search_drop_page_hash_index(block) # define btr_search_drop_page_hash_index(block)
# define btr_search_s_lock(index)
# define btr_search_s_unlock(index)
# define btr_search_s_lock_all(index) # define btr_search_s_lock_all(index)
# define btr_search_s_unlock_all(index) # define btr_search_s_unlock_all(index)
# define btr_search_x_lock(index)
# define btr_search_x_unlock(index)
# define btr_search_info_update(index, cursor) # define btr_search_info_update(index, cursor)
# define btr_search_move_or_delete_hash_entries(new_block, block, index) # define btr_search_move_or_delete_hash_entries(new_block, block, index)
# define btr_search_update_hash_on_insert(cursor) # define btr_search_update_hash_on_insert(cursor)
@@ -327,6 +290,18 @@ struct btr_search_t{
}; };
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/** @return number of leaf pages pointed to by the adaptive hash index */
inline ulint dict_index_t::n_ahi_pages() const
{
if (!btr_search_enabled)
return 0;
rw_lock_t *latch = btr_get_search_latch(this);
rw_lock_s_lock(latch);
ulint ref_count= search_info->ref_count;
rw_lock_s_unlock(latch);
return ref_count;
}
/** The hash index system */ /** The hash index system */
struct btr_search_sys_t{ struct btr_search_sys_t{
hash_table_t** hash_tables; /*!< the adaptive hash tables, hash_table_t** hash_tables; /*!< the adaptive hash tables,

View File

@@ -1,6 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -62,8 +63,8 @@ btr_search_info_update(
dict_index_t* index, /*!< in: index of the cursor */ dict_index_t* index, /*!< in: index of the cursor */
btr_cur_t* cursor) /*!< in: cursor which was just positioned */ btr_cur_t* cursor) /*!< in: cursor which was just positioned */
{ {
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S)); ut_ad(!rw_lock_own_flagged(btr_get_search_latch(index),
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
if (dict_index_is_spatial(index) || !btr_search_enabled) { if (dict_index_is_spatial(index) || !btr_search_enabled) {
return; return;
@@ -87,24 +88,6 @@ btr_search_info_update(
btr_search_info_update_slow(info, cursor); btr_search_info_update_slow(info, cursor);
} }
/** X-Lock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_x_lock(const dict_index_t* index)
{
rw_lock_x_lock(btr_get_search_latch(index));
}
/** X-Unlock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_x_unlock(const dict_index_t* index)
{
rw_lock_x_unlock(btr_get_search_latch(index));
}
/** Lock all search latches in exclusive mode. */ /** Lock all search latches in exclusive mode. */
UNIV_INLINE UNIV_INLINE
void void
@@ -125,24 +108,6 @@ btr_search_x_unlock_all()
} }
} }
/** S-Lock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_s_lock(const dict_index_t* index)
{
rw_lock_s_lock(btr_get_search_latch(index));
}
/** S-Unlock the search latch (corresponding to given index)
@param[in] index index handler */
UNIV_INLINE
void
btr_search_s_unlock(const dict_index_t* index)
{
rw_lock_s_unlock(btr_get_search_latch(index));
}
/** Lock all search latches in shared mode. */ /** Lock all search latches in shared mode. */
UNIV_INLINE UNIV_INLINE
void void

View File

@@ -71,11 +71,13 @@ struct fil_addr_t;
/* @} */ /* @} */
/** @name Modes for buf_page_get_known_nowait */ /** @name Modes for buf_page_get_known_nowait */
/* @{ */ /* @{ */
#define BUF_MAKE_YOUNG 51 /*!< Move the block to the #ifdef BTR_CUR_HASH_ADAPT
# define BUF_MAKE_YOUNG 51 /*!< Move the block to the
start of the LRU list if there start of the LRU list if there
is a danger that the block is a danger that the block
would drift out of the buffer would drift out of the buffer
pool*/ pool*/
#endif /* BTR_CUR_HASH_ADAPT */
#define BUF_KEEP_OLD 52 /*!< Preserve the current LRU #define BUF_KEEP_OLD 52 /*!< Preserve the current LRU
position of the block. */ position of the block. */
/* @} */ /* @} */
@@ -282,12 +284,6 @@ extern "C"
os_thread_ret_t os_thread_ret_t
DECLARE_THREAD(buf_resize_thread)(void*); DECLARE_THREAD(buf_resize_thread)(void*);
#ifdef BTR_CUR_HASH_ADAPT
/** Clear the adaptive hash index on all pages in the buffer pool. */
void
buf_pool_clear_hash_index();
#endif /* BTR_CUR_HASH_ADAPT */
/*********************************************************************//** /*********************************************************************//**
Gets the current size of buffer buf_pool in bytes. Gets the current size of buffer buf_pool in bytes.
@return size in bytes */ @return size in bytes */

View File

@@ -49,17 +49,6 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */ /** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */ #define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
#ifdef BTR_CUR_HASH_ADAPT
struct dict_table_t;
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
MY_ATTRIBUTE((warn_unused_result,nonnull));
#else
# define buf_LRU_drop_page_hash_for_tablespace(table)
#endif /* BTR_CUR_HASH_ADAPT */
/** Empty the flush list for all pages belonging to a tablespace. /** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier @param[in] id tablespace identifier
@param[in,out] observer flush observer, @param[in,out] observer flush observer,

View File

@@ -831,6 +831,9 @@ struct dict_index_t{
dict_table_t* table; /*!< back pointer to table */ dict_table_t* table; /*!< back pointer to table */
unsigned space:32; unsigned space:32;
/*!< space where the index tree is placed */ /*!< space where the index tree is placed */
/** root page number, or FIL_NULL if the index has been detached
from storage (DISCARD TABLESPACE or similar),
or 1 if the index is in table->freed_indexes */
unsigned page:32;/*!< index tree root page number */ unsigned page:32;/*!< index tree root page number */
unsigned merge_threshold:6; unsigned merge_threshold:6;
/*!< In the pessimistic delete, if the page /*!< In the pessimistic delete, if the page
@@ -1021,10 +1024,22 @@ struct dict_index_t{
for (unsigned i = 0; i < n_fields; i++) { for (unsigned i = 0; i < n_fields; i++) {
fields[i].col->detach(*this); fields[i].col->detach(*this);
} }
}
}
n_fields = 0; #ifdef BTR_CUR_HASH_ADAPT
} /** @return a clone of this */
} dict_index_t* clone() const;
/** Clone this index for lazy dropping of the adaptive hash index.
@return this or a clone */
dict_index_t* clone_if_needed();
/** @return number of leaf pages pointed to by the adaptive hash index */
inline ulint n_ahi_pages() const;
/** @return whether mark_freed() had been invoked */
bool freed() const { return UNIV_UNLIKELY(page == 1); }
/** Note that the index is waiting for btr_search_lazy_free() */
void set_freed() { ut_ad(!freed()); page= 1; }
#endif /* BTR_CUR_HASH_ADAPT */
/** This ad-hoc class is used by record_size_info only. */ /** This ad-hoc class is used by record_size_info only. */
class record_size_info_t { class record_size_info_t {
@@ -1605,6 +1620,11 @@ struct dict_table_t {
/** List of indexes of the table. */ /** List of indexes of the table. */
UT_LIST_BASE_NODE_T(dict_index_t) indexes; UT_LIST_BASE_NODE_T(dict_index_t) indexes;
#ifdef BTR_CUR_HASH_ADAPT
/** List of detached indexes that are waiting to be freed along with
the last adaptive hash index entry */
UT_LIST_BASE_NODE_T(dict_index_t) freed_indexes;
#endif /* BTR_CUR_HASH_ADAPT */
/** List of foreign key constraints in the table. These refer to /** List of foreign key constraints in the table. These refer to
columns in other tables. */ columns in other tables. */

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2019, MariaDB Corporation. Copyright (c) 2013, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -568,22 +568,11 @@ fsp_get_available_space_in_free_extents(
/**********************************************************************//** /**********************************************************************//**
Frees a single page of a segment. */ Frees a single page of a segment. */
void void
fseg_free_page_func( fseg_free_page(
fseg_header_t* seg_header, /*!< in: segment header */ fseg_header_t* seg_header, /*!< in: segment header */
ulint space_id, /*!< in: space id */ ulint space_id, /*!< in: space id */
ulint page, /*!< in: page offset */ ulint page, /*!< in: page offset */
#ifdef BTR_CUR_HASH_ADAPT
bool ahi, /*!< in: whether we may need to drop
the adaptive hash index */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr); /*!< in/out: mini-transaction */ mtr_t* mtr); /*!< in/out: mini-transaction */
#ifdef BTR_CUR_HASH_ADAPT
# define fseg_free_page(header, space_id, page, ahi, mtr) \
fseg_free_page_func(header, space_id, page, ahi, mtr)
#else /* BTR_CUR_HASH_ADAPT */
# define fseg_free_page(header, space_id, page, ahi, mtr) \
fseg_free_page_func(header, space_id, page, mtr)
#endif /* BTR_CUR_HASH_ADAPT */
/** Determine whether a page is free. /** Determine whether a page is free.
@param[in,out] space tablespace @param[in,out] space tablespace
@param[in] page page number @param[in] page page number
@@ -596,45 +585,25 @@ Frees part of a segment. This function can be used to free a segment
by repeatedly calling this function in different mini-transactions. by repeatedly calling this function in different mini-transactions.
Doing the freeing in a single mini-transaction might result in Doing the freeing in a single mini-transaction might result in
too big a mini-transaction. too big a mini-transaction.
@return TRUE if freeing completed */ @return whether the freeing was completed */
ibool bool
fseg_free_step_func( fseg_free_step(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list resides on the first page of the frag list
of the segment, this pointer becomes obsolete of the segment, this pointer becomes obsolete
after the last freeing step */ after the last freeing step */
#ifdef BTR_CUR_HASH_ADAPT
bool ahi, /*!< in: whether we may need to drop
the adaptive hash index */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */ mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((warn_unused_result)); MY_ATTRIBUTE((warn_unused_result));
#ifdef BTR_CUR_HASH_ADAPT
# define fseg_free_step(header, ahi, mtr) fseg_free_step_func(header, ahi, mtr)
#else /* BTR_CUR_HASH_ADAPT */
# define fseg_free_step(header, ahi, mtr) fseg_free_step_func(header, mtr)
#endif /* BTR_CUR_HASH_ADAPT */
/**********************************************************************//** /**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed. leaves the header page unfreed.
@return TRUE if freeing completed, except the header page */ @return whether the freeing was completed, except for the header page */
ibool bool
fseg_free_step_not_header_func( fseg_free_step_not_header(
fseg_header_t* header, /*!< in: segment header which must reside on fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */ the first fragment page of the segment */
#ifdef BTR_CUR_HASH_ADAPT
bool ahi, /*!< in: whether we may need to drop
the adaptive hash index */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */ mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((warn_unused_result)); MY_ATTRIBUTE((warn_unused_result));
#ifdef BTR_CUR_HASH_ADAPT
# define fseg_free_step_not_header(header, ahi, mtr) \
fseg_free_step_not_header_func(header, ahi, mtr)
#else /* BTR_CUR_HASH_ADAPT */
# define fseg_free_step_not_header(header, ahi, mtr) \
fseg_free_step_not_header_func(header, mtr)
#endif /* BTR_CUR_HASH_ADAPT */
/** Reset the page type. /** Reset the page type.
Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE. Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE.

View File

@@ -26,6 +26,9 @@ Created 2012-02-08 by Sunny Bains.
#include "row0import.h" #include "row0import.h"
#include "btr0pcur.h" #include "btr0pcur.h"
#ifdef BTR_CUR_HASH_ADAPT
# include "btr0sea.h"
#endif
#include "que0que.h" #include "que0que.h"
#include "dict0boot.h" #include "dict0boot.h"
#include "dict0load.h" #include "dict0load.h"
@@ -4017,15 +4020,12 @@ row_import_for_mysql(
index entries that point to cached garbage pages in the buffer index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the detach any remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents; adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */ false positives are not tolerated. */
while (buf_LRU_drop_page_hash_for_tablespace(table)) { for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); index;
if (trx_is_interrupted(trx) index = UT_LIST_GET_NEXT(indexes, index)) {
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) { index = index->clone_if_needed();
err = DB_INTERRUPTED;
break;
}
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */

View File

@@ -2907,7 +2907,7 @@ row_ins_sec_index_entry_low(
err = btr_cur_search_to_nth_level( err = btr_cur_search_to_nth_level(
index, 0, entry, PAGE_CUR_RTREE_INSERT, index, 0, entry, PAGE_CUR_RTREE_INSERT,
search_mode, search_mode,
&cursor, 0, __FILE__, __LINE__, &mtr); &cursor, 0, __FILE__, __LINE__, &mtr, 0);
if (mode == BTR_MODIFY_LEAF && rtr_info.mbr_adj) { if (mode == BTR_MODIFY_LEAF && rtr_info.mbr_adj) {
mtr_commit(&mtr); mtr_commit(&mtr);
@@ -2922,7 +2922,7 @@ row_ins_sec_index_entry_low(
err = btr_cur_search_to_nth_level( err = btr_cur_search_to_nth_level(
index, 0, entry, PAGE_CUR_RTREE_INSERT, index, 0, entry, PAGE_CUR_RTREE_INSERT,
search_mode, search_mode,
&cursor, 0, __FILE__, __LINE__, &mtr); &cursor, 0, __FILE__, __LINE__, &mtr, 0);
mode = BTR_MODIFY_TREE; mode = BTR_MODIFY_TREE;
} }
@@ -2934,7 +2934,7 @@ row_ins_sec_index_entry_low(
err = btr_cur_search_to_nth_level( err = btr_cur_search_to_nth_level(
index, 0, entry, PAGE_CUR_LE, index, 0, entry, PAGE_CUR_LE,
search_mode, search_mode,
&cursor, 0, __FILE__, __LINE__, &mtr); &cursor, 0, __FILE__, __LINE__, &mtr, 0);
} }
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
@@ -3028,7 +3028,7 @@ row_ins_sec_index_entry_low(
index, 0, entry, PAGE_CUR_LE, index, 0, entry, PAGE_CUR_LE,
(search_mode (search_mode
& ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE)), & ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE)),
&cursor, 0, __FILE__, __LINE__, &mtr); &cursor, 0, __FILE__, __LINE__, &mtr, 0);
} }
if (row_ins_must_modify_rec(&cursor)) { if (row_ins_must_modify_rec(&cursor)) {

View File

@@ -3102,7 +3102,7 @@ row_log_apply_op_low(
? BTR_MODIFY_TREE ? BTR_MODIFY_TREE
: BTR_MODIFY_LEAF, : BTR_MODIFY_LEAF,
&cursor, 0, __FILE__, __LINE__, &cursor, 0, __FILE__, __LINE__,
&mtr); &mtr, 0);
ut_ad(dict_index_get_n_unique(index) > 0); ut_ad(dict_index_get_n_unique(index) > 0);
/* This test is somewhat similar to row_ins_must_modify_rec(), /* This test is somewhat similar to row_ins_must_modify_rec(),
@@ -3151,7 +3151,7 @@ row_log_apply_op_low(
btr_cur_search_to_nth_level( btr_cur_search_to_nth_level(
index, 0, entry, PAGE_CUR_LE, index, 0, entry, PAGE_CUR_LE,
BTR_MODIFY_TREE, &cursor, 0, BTR_MODIFY_TREE, &cursor, 0,
__FILE__, __LINE__, &mtr); __FILE__, __LINE__, &mtr, 0);
/* No other thread than the current one /* No other thread than the current one
is allowed to modify the index tree. is allowed to modify the index tree.
@@ -3254,7 +3254,7 @@ insert_the_rec:
btr_cur_search_to_nth_level( btr_cur_search_to_nth_level(
index, 0, entry, PAGE_CUR_LE, index, 0, entry, PAGE_CUR_LE,
BTR_MODIFY_TREE, &cursor, 0, BTR_MODIFY_TREE, &cursor, 0,
__FILE__, __LINE__, &mtr); __FILE__, __LINE__, &mtr, 0);
} }
/* We already determined that the /* We already determined that the

View File

@@ -46,6 +46,9 @@ Completed by Sunny Bains and Marko Makela
#include "row0vers.h" #include "row0vers.h"
#include "handler0alter.h" #include "handler0alter.h"
#include "btr0bulk.h" #include "btr0bulk.h"
#ifdef BTR_CUR_ADAPT
# include "btr0sea.h"
#endif /* BTR_CUR_ADAPT */
#include "ut0stage.h" #include "ut0stage.h"
#include "fil0crypt.h" #include "fil0crypt.h"
@@ -162,7 +165,7 @@ public:
PAGE_CUR_RTREE_INSERT, PAGE_CUR_RTREE_INSERT,
BTR_MODIFY_LEAF, &ins_cur, BTR_MODIFY_LEAF, &ins_cur,
0, __FILE__, __LINE__, 0, __FILE__, __LINE__,
&mtr); &mtr, 0);
/* It need to update MBR in parent entry, /* It need to update MBR in parent entry,
so change search mode to BTR_MODIFY_TREE */ so change search mode to BTR_MODIFY_TREE */
@@ -178,7 +181,7 @@ public:
m_index, 0, dtuple, m_index, 0, dtuple,
PAGE_CUR_RTREE_INSERT, PAGE_CUR_RTREE_INSERT,
BTR_MODIFY_TREE, &ins_cur, 0, BTR_MODIFY_TREE, &ins_cur, 0,
__FILE__, __LINE__, &mtr); __FILE__, __LINE__, &mtr, 0);
} }
error = btr_cur_optimistic_insert( error = btr_cur_optimistic_insert(
@@ -201,8 +204,7 @@ public:
PAGE_CUR_RTREE_INSERT, PAGE_CUR_RTREE_INSERT,
BTR_MODIFY_TREE, BTR_MODIFY_TREE,
&ins_cur, 0, &ins_cur, 0,
__FILE__, __LINE__, &mtr); __FILE__, __LINE__, &mtr, 0);
error = btr_cur_pessimistic_insert( error = btr_cur_pessimistic_insert(
flag, &ins_cur, &ins_offsets, flag, &ins_cur, &ins_offsets,
@@ -3770,6 +3772,9 @@ row_merge_drop_indexes(
we should exclude FTS entries from we should exclude FTS entries from
prebuilt->ins_node->entry_list prebuilt->ins_node->entry_list
in ins_node_create_entry_list(). */ in ins_node_create_entry_list(). */
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!index->search_info->ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache( dict_index_remove_from_cache(
table, index); table, index);
index = prev; index = prev;

View File

@@ -2459,6 +2459,9 @@ row_create_index_for_mysql(
index->table = table; index->table = table;
err = dict_create_index_tree_in_mem(index, trx); err = dict_create_index_tree_in_mem(index, trx);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!index->search_info->ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
dict_index_remove_from_cache(table, index); dict_index_remove_from_cache(table, index);
@@ -3372,35 +3375,6 @@ row_drop_table_for_mysql(
if (!dict_table_is_temporary(table)) { if (!dict_table_is_temporary(table)) {
if (table->space != TRX_SYS_SPACE) { if (table->space != TRX_SYS_SPACE) {
#ifdef BTR_CUR_HASH_ADAPT
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
const bool immune = is_temp_name
|| create_failed
|| sqlcom == SQLCOM_CREATE_TABLE
|| strstr(table->name.m_name, "/FTS");
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if ((!immune && trx_is_interrupted(trx))
|| srv_shutdown_state
!= SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
table->to_be_dropped = false;
dict_table_close(table, true, false);
goto funct_exit;
}
}
#endif /* BTR_CUR_HASH_ADAPT */
/* Delete the link file if used. */ /* Delete the link file if used. */
if (DICT_TF_HAS_DATA_DIR(table->flags)) { if (DICT_TF_HAS_DATA_DIR(table->flags)) {
RemoteDatafile::delete_link_file(name); RemoteDatafile::delete_link_file(name);

View File

@@ -2,7 +2,7 @@
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc. Copyright (c) 2008, Google Inc.
Copyright (c) 2015, 2019, MariaDB Corporation. Copyright (c) 2015, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1289,23 +1289,17 @@ void
row_sel_open_pcur( row_sel_open_pcur(
/*==============*/ /*==============*/
plan_t* plan, /*!< in: table plan */ plan_t* plan, /*!< in: table plan */
ibool search_latch_locked, #ifdef BTR_CUR_HASH_ADAPT
/*!< in: TRUE if the thread currently ulint has_search_latch,
has the search latch locked in #endif
s-mode */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
dict_index_t* index; dict_index_t* index;
func_node_t* cond; func_node_t* cond;
que_node_t* exp; que_node_t* exp;
ulint n_fields; ulint n_fields;
ulint has_search_latch = 0; /* RW_S_LATCH or 0 */
ulint i; ulint i;
if (search_latch_locked) {
has_search_latch = RW_S_LATCH;
}
index = plan->index; index = plan->index;
/* Calculate the value of the search tuple: the exact match columns /* Calculate the value of the search tuple: the exact match columns
@@ -1357,6 +1351,11 @@ row_sel_open_pcur(
plan->pcur_is_open = TRUE; plan->pcur_is_open = TRUE;
} }
#ifndef BTR_CUR_HASH_ADAPT
# define row_sel_open_pcur(plan, has_search_latch, mtr) \
row_sel_open_pcur(plan, mtr)
#endif /* !BTR_CUR_HASH_ADAPT */
/*********************************************************************//** /*********************************************************************//**
Restores a stored pcur position to a table index. Restores a stored pcur position to a table index.
@return TRUE if the cursor should be moved to the next record after we @return TRUE if the cursor should be moved to the next record after we
@@ -1618,12 +1617,6 @@ row_sel(
ut_ad(thr->run_node == node); ut_ad(thr->run_node == node);
#ifdef BTR_CUR_HASH_ADAPT
ibool search_latch_locked = FALSE;
#else /* BTR_CUR_HASH_ADAPT */
# define search_latch_locked false
#endif /* BTR_CUR_HASH_ADAPT */
if (node->read_view) { if (node->read_view) {
/* In consistent reads, we try to do with the hash index and /* In consistent reads, we try to do with the hash index and
not to use the buffer page get. This is to reduce memory bus not to use the buffer page get. This is to reduce memory bus
@@ -1648,6 +1641,10 @@ table_loop:
plan = sel_node_get_nth_plan(node, node->fetch_table); plan = sel_node_get_nth_plan(node, node->fetch_table);
index = plan->index; index = plan->index;
#ifdef BTR_CUR_HASH_ADAPT
ulint has_search_latch = 0;
rw_lock_t* const latch = btr_get_search_latch(index);
#endif /* BTR_CUR_HASH_ADAPT */
if (plan->n_rows_prefetched > 0) { if (plan->n_rows_prefetched > 0) {
sel_dequeue_prefetched_row(plan); sel_dequeue_prefetched_row(plan);
@@ -1672,26 +1669,22 @@ table_loop:
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
if (consistent_read && plan->unique_search && !plan->pcur_is_open if (consistent_read && plan->unique_search && !plan->pcur_is_open
&& !plan->must_get_clust) { && !plan->must_get_clust) {
if (!search_latch_locked) { if (!has_search_latch) {
btr_search_s_lock(index); has_search_latch = RW_S_LATCH;
rw_lock_s_lock(latch);
search_latch_locked = TRUE; } else if (rw_lock_get_writer(latch) == RW_LOCK_X_WAIT) {
} else if (rw_lock_get_writer(btr_get_search_latch(index))
== RW_LOCK_X_WAIT) {
/* There is an x-latch request waiting: release the /* There is an x-latch request waiting: release the
s-latch for a moment; as an s-latch here is often s-latch for a moment; as an s-latch here is often
kept for some 10 searches before being released, kept for some 10 searches before being released,
a waiting x-latch request would block other threads a waiting x-latch request would block other threads
from acquiring an s-latch for a long time, lowering from acquiring an s-latch for a long time, lowering
performance significantly in multiprocessors. */ performance significantly in multiprocessors. */
rw_lock_s_unlock(latch);
btr_search_s_unlock(index); rw_lock_s_lock(latch);
btr_search_s_lock(index);
} }
switch (row_sel_try_search_shortcut(node, plan, switch (row_sel_try_search_shortcut(node, plan,
search_latch_locked, has_search_latch,
&mtr)) { &mtr)) {
case SEL_FOUND: case SEL_FOUND:
goto next_table; goto next_table;
@@ -1709,10 +1702,9 @@ table_loop:
mtr.start(); mtr.start();
} }
if (search_latch_locked) { if (has_search_latch) {
btr_search_s_unlock(index); has_search_latch = 0;
rw_lock_s_unlock(latch);
search_latch_locked = FALSE;
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
@@ -1720,7 +1712,7 @@ table_loop:
/* Evaluate the expressions to build the search tuple and /* Evaluate the expressions to build the search tuple and
open the cursor */ open the cursor */
row_sel_open_pcur(plan, search_latch_locked, &mtr); row_sel_open_pcur(plan, has_search_latch, &mtr);
cursor_just_opened = TRUE; cursor_just_opened = TRUE;
@@ -2117,7 +2109,9 @@ skip_lock:
} }
next_rec: next_rec:
ut_ad(!search_latch_locked); #ifdef BTR_CUR_HASH_ADAPT
ut_ad(!has_search_latch);
#endif /* BTR_CUR_HASH_ADAPT */
if (mtr_has_extra_clust_latch) { if (mtr_has_extra_clust_latch) {
@@ -2156,8 +2150,9 @@ next_table:
plan->cursor_at_end = TRUE; plan->cursor_at_end = TRUE;
} else { } else {
ut_ad(!search_latch_locked); #ifdef BTR_CUR_HASH_ADAPT
ut_ad(!has_search_latch);
#endif /* BTR_CUR_HASH_ADAPT */
plan->stored_cursor_rec_processed = TRUE; plan->stored_cursor_rec_processed = TRUE;
btr_pcur_store_position(&(plan->pcur), &mtr); btr_pcur_store_position(&(plan->pcur), &mtr);
@@ -2248,8 +2243,9 @@ stop_for_a_while:
inserted new records which should have appeared in the result set, inserted new records which should have appeared in the result set,
which would result in the phantom problem. */ which would result in the phantom problem. */
ut_ad(!search_latch_locked); #ifdef BTR_CUR_HASH_ADAPT
ut_ad(!has_search_latch);
#endif /* BTR_CUR_HASH_ADAPT */
plan->stored_cursor_rec_processed = FALSE; plan->stored_cursor_rec_processed = FALSE;
btr_pcur_store_position(&(plan->pcur), &mtr); btr_pcur_store_position(&(plan->pcur), &mtr);
@@ -2266,7 +2262,9 @@ commit_mtr_for_a_while:
plan->stored_cursor_rec_processed = TRUE; plan->stored_cursor_rec_processed = TRUE;
ut_ad(!search_latch_locked); #ifdef BTR_CUR_HASH_ADAPT
ut_ad(!has_search_latch);
#endif /* BTR_CUR_HASH_ADAPT */
btr_pcur_store_position(&(plan->pcur), &mtr); btr_pcur_store_position(&(plan->pcur), &mtr);
mtr.commit(); mtr.commit();
@@ -2280,7 +2278,9 @@ lock_wait_or_error:
/* See the note at stop_for_a_while: the same holds for this case */ /* See the note at stop_for_a_while: the same holds for this case */
ut_ad(!btr_pcur_is_before_first_on_page(&plan->pcur) || !node->asc); ut_ad(!btr_pcur_is_before_first_on_page(&plan->pcur) || !node->asc);
ut_ad(!search_latch_locked); #ifdef BTR_CUR_HASH_ADAPT
ut_ad(!has_search_latch);
#endif /* BTR_CUR_HASH_ADAPT */
plan->stored_cursor_rec_processed = FALSE; plan->stored_cursor_rec_processed = FALSE;
btr_pcur_store_position(&(plan->pcur), &mtr); btr_pcur_store_position(&(plan->pcur), &mtr);
@@ -2289,8 +2289,8 @@ lock_wait_or_error:
func_exit: func_exit:
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
if (search_latch_locked) { if (has_search_latch) {
btr_search_s_unlock(index); rw_lock_s_unlock(latch);
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
ut_ad(!sync_check_iterate(dict_sync_check())); ut_ad(!sync_check_iterate(dict_sync_check()));
@@ -4460,7 +4460,6 @@ row_search_mvcc(
&& !prebuilt->templ_contains_blob && !prebuilt->templ_contains_blob
&& !prebuilt->used_in_HANDLER && !prebuilt->used_in_HANDLER
&& (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) { && (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) {
mode = PAGE_CUR_GE; mode = PAGE_CUR_GE;
if (trx->mysql_n_tables_locked == 0 if (trx->mysql_n_tables_locked == 0
@@ -4480,7 +4479,8 @@ row_search_mvcc(
and if we try that, we can deadlock on the adaptive and if we try that, we can deadlock on the adaptive
hash index semaphore! */ hash index semaphore! */
rw_lock_s_lock(btr_get_search_latch(index)); rw_lock_t* const latch = btr_get_search_latch(index);
rw_lock_s_lock(latch);
switch (row_sel_try_search_shortcut_for_mysql( switch (row_sel_try_search_shortcut_for_mysql(
&rec, prebuilt, &offsets, &heap, &rec, prebuilt, &offsets, &heap,
@@ -4534,7 +4534,7 @@ row_search_mvcc(
err = DB_SUCCESS; err = DB_SUCCESS;
rw_lock_s_unlock(btr_get_search_latch(index)); rw_lock_s_unlock(latch);
goto func_exit; goto func_exit;
@@ -4544,7 +4544,7 @@ row_search_mvcc(
err = DB_RECORD_NOT_FOUND; err = DB_RECORD_NOT_FOUND;
rw_lock_s_unlock(btr_get_search_latch(index)); rw_lock_s_unlock(latch);
/* NOTE that we do NOT store the cursor /* NOTE that we do NOT store the cursor
position */ position */
@@ -4561,7 +4561,7 @@ row_search_mvcc(
mtr.commit(); mtr.commit();
mtr.start(); mtr.start();
rw_lock_s_unlock(btr_get_search_latch(index)); rw_lock_s_unlock(latch);
} }
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 2013, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation. Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -39,6 +39,7 @@ Created 2013-04-12 Sunny Bains
#include "os0file.h" #include "os0file.h"
#include "que0que.h" #include "que0que.h"
#include "trx0undo.h" #include "trx0undo.h"
#include "btr0sea.h"
/* FIXME: For temporary tables, use a simple approach of btr_free() /* FIXME: For temporary tables, use a simple approach of btr_free()
and btr_create() of each index tree. */ and btr_create() of each index tree. */
@@ -1953,7 +1954,6 @@ dberr_t row_truncate_table_for_mysql(dict_table_t* table, trx_t* trx)
for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
index != NULL; index != NULL;
index = UT_LIST_GET_NEXT(indexes, index)) { index = UT_LIST_GET_NEXT(indexes, index)) {
err = dict_truncate_index_tree_in_mem(index); err = dict_truncate_index_tree_in_mem(index);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
@@ -2003,6 +2003,15 @@ dberr_t row_truncate_table_for_mysql(dict_table_t* table, trx_t* trx)
os_thread_sleep(2000000); os_thread_sleep(2000000);
DBUG_SUICIDE();); DBUG_SUICIDE(););
#ifdef BTR_CUR_HASH_ADAPT
dict_table_x_unlock_indexes(table);
for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); index;
index = UT_LIST_GET_NEXT(indexes, index)) {
index = index->clone_if_needed();
}
dict_table_x_lock_indexes(table);
#endif /* BTR_CUR_HASH_ADAPT */
/* Step-10: Re-create new indexes. */ /* Step-10: Re-create new indexes. */
if (!dict_table_is_temporary(table)) { if (!dict_table_is_temporary(table)) {

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation. Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -383,7 +383,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
} }
if (fseg_free_step_not_header( if (fseg_free_step_not_header(
seg_hdr + TRX_UNDO_FSEG_HEADER, false, &mtr)) { seg_hdr + TRX_UNDO_FSEG_HEADER, &mtr)) {
break; break;
} }
@@ -413,7 +413,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
is not flooded with bufferfixed pages: see the note in is not flooded with bufferfixed pages: see the note in
fsp0fsp.cc. */ fsp0fsp.cc. */
} while (!fseg_free_step(seg_hdr + TRX_UNDO_FSEG_HEADER, false, &mtr)); } while (!fseg_free_step(seg_hdr + TRX_UNDO_FSEG_HEADER, &mtr));
hist_size = mtr_read_ulint(rseg_hdr + TRX_RSEG_HISTORY_SIZE, hist_size = mtr_read_ulint(rseg_hdr + TRX_RSEG_HISTORY_SIZE,
MLOG_4BYTES, &mtr); MLOG_4BYTES, &mtr);

View File

@@ -869,7 +869,7 @@ trx_undo_free_page(
undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr); undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
fseg_free_page(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, fseg_free_page(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
space, page_no, false, mtr); space, page_no, mtr);
last_addr = flst_get_last(header_page + TRX_UNDO_SEG_HDR last_addr = flst_get_last(header_page + TRX_UNDO_SEG_HDR
+ TRX_UNDO_PAGE_LIST, mtr); + TRX_UNDO_PAGE_LIST, mtr);
@@ -1092,7 +1092,7 @@ trx_undo_seg_free(
file_seg = seg_header + TRX_UNDO_FSEG_HEADER; file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
finished = fseg_free_step(file_seg, false, &mtr); finished = fseg_free_step(file_seg, &mtr);
if (finished) { if (finished) {
/* Update the rseg header */ /* Update the rseg header */