mirror of
https://github.com/MariaDB/server.git
synced 2026-01-06 05:22:24 +03:00
Merge 10.2 into 10.3
This commit is contained in:
@@ -1,32 +0,0 @@
|
||||
include/master-slave.inc
|
||||
[connection master]
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (b longblob) engine=innodb;
|
||||
create table t3 (c int) engine=innodb;
|
||||
insert into t2 values (repeat('b',1024*1024));
|
||||
insert into t2 select * from t2;
|
||||
insert into t2 select * from t2;
|
||||
insert into t2 select * from t2;
|
||||
insert into t2 select * from t2;
|
||||
set debug_sync='rm_table_no_locks_before_delete_table SIGNAL nogo WAIT_FOR go EXECUTE 2';
|
||||
drop table t1, t2, t3;
|
||||
connect foo,localhost,root;
|
||||
set debug_sync='now SIGNAL go';
|
||||
kill query CONNECTION_ID;
|
||||
connection master;
|
||||
ERROR 70100: Query execution was interrupted
|
||||
"Tables t2 and t3 should be listed"
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t2
|
||||
t3
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Gtid # # GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
|
||||
connection slave;
|
||||
drop table t2, t3;
|
||||
connection master;
|
||||
set debug_sync='RESET';
|
||||
drop table t2, t3;
|
||||
include/rpl_end.inc
|
||||
@@ -1,64 +0,0 @@
|
||||
# ==== Purpose ====
|
||||
#
|
||||
# Check that when the execution of a DROP TABLE command with single table
|
||||
# fails it should not be written to the binary log. Also test that when the
|
||||
# execution of DROP TABLE command with multiple tables fails the command
|
||||
# should be written into the binary log.
|
||||
#
|
||||
# ==== Implementation ====
|
||||
#
|
||||
# Steps:
|
||||
# 0 - Create tables named t1, t2, t3
|
||||
# 1 - Execute DROP TABLE t1,t2,t3 command.
|
||||
# 2 - Kill the DROP TABLE command while it is trying to drop table 't2'.
|
||||
# 3 - Verify that tables t2,t3 are present after the DROP command execution
|
||||
# was interrupted.
|
||||
# 4 - Check that table 't1' is present in binary log as part of DROP
|
||||
# command.
|
||||
#
|
||||
# ==== References ====
|
||||
#
|
||||
# MDEV-20348: DROP TABLE IF EXISTS killed on master but was replicated.
|
||||
#
|
||||
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/have_binlog_format_statement.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (b longblob) engine=innodb;
|
||||
create table t3 (c int) engine=innodb;
|
||||
insert into t2 values (repeat('b',1024*1024));
|
||||
insert into t2 select * from t2;
|
||||
insert into t2 select * from t2;
|
||||
insert into t2 select * from t2;
|
||||
insert into t2 select * from t2;
|
||||
let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
|
||||
|
||||
let $id=`select connection_id()`;
|
||||
set debug_sync='rm_table_no_locks_before_delete_table SIGNAL nogo WAIT_FOR go EXECUTE 2';
|
||||
send drop table t1, t2, t3;
|
||||
|
||||
connect foo,localhost,root;
|
||||
set debug_sync='now SIGNAL go';
|
||||
let $wait_condition=select 1 from information_schema.processlist where state like 'debug sync point:%';
|
||||
source include/wait_condition.inc;
|
||||
--replace_result $id CONNECTION_ID
|
||||
eval kill query $id;
|
||||
|
||||
connection master;
|
||||
error ER_QUERY_INTERRUPTED;
|
||||
reap;
|
||||
|
||||
--echo "Tables t2 and t3 should be listed"
|
||||
SHOW TABLES;
|
||||
--source include/show_binlog_events.inc
|
||||
--sync_slave_with_master
|
||||
drop table t2, t3;
|
||||
|
||||
connection master;
|
||||
set debug_sync='RESET';
|
||||
drop table t2, t3;
|
||||
|
||||
source include/rpl_end.inc;
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2014, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2014, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -722,8 +722,10 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
|
||||
{
|
||||
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
ut_ad(!block->index || !blob);
|
||||
ut_ad(!block->index || page_is_leaf(block->frame));
|
||||
if (block->index && !block->index->freed()) {
|
||||
ut_ad(!blob);
|
||||
ut_ad(page_is_leaf(block->frame));
|
||||
}
|
||||
#endif
|
||||
ut_ad(index->table->space_id == block->page.id.space());
|
||||
/* The root page is freed by btr_free_root(). */
|
||||
@@ -748,8 +750,7 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
|
||||
? PAGE_HEADER + PAGE_BTR_SEG_LEAF
|
||||
: PAGE_HEADER + PAGE_BTR_SEG_TOP];
|
||||
fseg_free_page(seg_header,
|
||||
index->table->space, block->page.id.page_no(),
|
||||
block->index != NULL, mtr);
|
||||
index->table->space, block->page.id.page_no(), mtr);
|
||||
|
||||
/* The page was marked free in the allocation bitmap, but it
|
||||
should remain exclusively latched until mtr_t::commit() or until it
|
||||
@@ -1004,7 +1005,7 @@ static void btr_free_root(buf_block_t* block, mtr_t* mtr, bool invalidate)
|
||||
BTR_FREED_INDEX_ID, mtr);
|
||||
}
|
||||
|
||||
while (!fseg_free_step(header, true, mtr)) {
|
||||
while (!fseg_free_step(header, mtr)) {
|
||||
/* Free the entire segment in small steps. */
|
||||
}
|
||||
}
|
||||
@@ -1251,7 +1252,7 @@ leaf_loop:
|
||||
fsp0fsp. */
|
||||
|
||||
finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF,
|
||||
true, &mtr);
|
||||
&mtr);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
if (!finished) {
|
||||
@@ -1271,7 +1272,7 @@ top_loop:
|
||||
#endif /* UNIV_BTR_DEBUG */
|
||||
|
||||
finished = fseg_free_step_not_header(
|
||||
root + PAGE_HEADER + PAGE_BTR_SEG_TOP, true, &mtr);
|
||||
root + PAGE_HEADER + PAGE_BTR_SEG_TOP, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
if (!finished) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2016, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2008, Google Inc.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
Portions of this file contain modifications contributed and copyrighted by
|
||||
Google, Inc. Those modifications are gratefully acknowledged and are described
|
||||
@@ -191,15 +191,8 @@ static
|
||||
void
|
||||
btr_search_check_free_space_in_heap(const dict_index_t* index)
|
||||
{
|
||||
hash_table_t* table;
|
||||
mem_heap_t* heap;
|
||||
|
||||
ut_ad(!btr_search_own_any(RW_LOCK_S));
|
||||
ut_ad(!btr_search_own_any(RW_LOCK_X));
|
||||
|
||||
table = btr_get_search_table(index);
|
||||
|
||||
heap = table->heap;
|
||||
hash_table_t* table = btr_get_search_table(index);
|
||||
mem_heap_t* heap = table->heap;
|
||||
|
||||
/* Note that we peek the value of heap->free_block without reserving
|
||||
the latch: this is ok, because we will not guarantee that there will
|
||||
@@ -342,8 +335,6 @@ btr_search_disable_ref_count(
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
for (index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
@@ -351,6 +342,81 @@ btr_search_disable_ref_count(
|
||||
}
|
||||
}
|
||||
|
||||
/** Lazily free detached metadata when removing the last reference. */
|
||||
ATTRIBUTE_COLD static void btr_search_lazy_free(dict_index_t *index)
|
||||
{
|
||||
ut_ad(index->freed());
|
||||
dict_table_t *table= index->table;
|
||||
/* Perform the skipped steps of dict_index_remove_from_cache_low(). */
|
||||
UT_LIST_REMOVE(table->freed_indexes, index);
|
||||
rw_lock_free(&index->lock);
|
||||
dict_mem_index_free(index);
|
||||
|
||||
if (!UT_LIST_GET_LEN(table->freed_indexes) &&
|
||||
!UT_LIST_GET_LEN(table->indexes))
|
||||
{
|
||||
ut_ad(table->id == 0);
|
||||
dict_mem_table_free(table);
|
||||
}
|
||||
}
|
||||
|
||||
/** Clear the adaptive hash index on all pages in the buffer pool. */
|
||||
static void buf_pool_clear_hash_index()
|
||||
{
|
||||
ut_ad(btr_search_own_all(RW_LOCK_X));
|
||||
ut_ad(!btr_search_enabled);
|
||||
|
||||
std::set<dict_index_t*> garbage;
|
||||
|
||||
for (ulong p = 0; p < srv_buf_pool_instances; p++)
|
||||
{
|
||||
buf_pool_t *buf_pool= buf_pool_from_array(p);
|
||||
buf_chunk_t *chunks= buf_pool->chunks;
|
||||
buf_chunk_t *chunk= chunks + buf_pool->n_chunks;
|
||||
|
||||
while (--chunk >= chunks)
|
||||
{
|
||||
buf_block_t *block= chunk->blocks;
|
||||
for (ulint i= chunk->size; i--; block++)
|
||||
{
|
||||
dict_index_t *index= block->index;
|
||||
assert_block_ahi_valid(block);
|
||||
|
||||
/* We can clear block->index and block->n_pointers when
|
||||
btr_search_own_all(RW_LOCK_X); see the comments in buf0buf.h */
|
||||
|
||||
if (!index)
|
||||
{
|
||||
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
ut_a(!block->n_pointers);
|
||||
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
||||
continue;
|
||||
}
|
||||
|
||||
ut_d(buf_page_state state= buf_block_get_state(block));
|
||||
/* Another thread may have set the state to
|
||||
BUF_BLOCK_REMOVE_HASH in buf_LRU_block_remove_hashed().
|
||||
|
||||
The state change in buf_page_realloc() is not observable here,
|
||||
because in that case we would have !block->index.
|
||||
|
||||
In the end, the entire adaptive hash index will be removed. */
|
||||
ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
|
||||
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
block->n_pointers= 0;
|
||||
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
||||
if (index->freed())
|
||||
garbage.insert(index);
|
||||
block->index= NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (std::set<dict_index_t*>::iterator i= garbage.begin();
|
||||
i != garbage.end(); i++)
|
||||
btr_search_lazy_free(*i);
|
||||
}
|
||||
|
||||
/** Disable the adaptive hash search system and empty the index.
|
||||
@param[in] need_mutex need to acquire dict_sys->mutex */
|
||||
void btr_search_disable(bool need_mutex)
|
||||
@@ -420,31 +486,6 @@ void btr_search_enable()
|
||||
btr_search_x_unlock_all();
|
||||
}
|
||||
|
||||
/** Returns the value of ref_count. The value is protected by latch.
|
||||
@param[in] info search info
|
||||
@param[in] index index identifier
|
||||
@return ref_count value. */
|
||||
ulint
|
||||
btr_search_info_get_ref_count(
|
||||
btr_search_t* info,
|
||||
dict_index_t* index)
|
||||
{
|
||||
ulint ret = 0;
|
||||
|
||||
if (!btr_search_enabled) {
|
||||
return(ret);
|
||||
}
|
||||
|
||||
ut_ad(info);
|
||||
|
||||
rw_lock_t* ahi_latch = btr_get_search_latch(index);
|
||||
rw_lock_s_lock(ahi_latch);
|
||||
ret = info->ref_count;
|
||||
rw_lock_s_unlock(ahi_latch);
|
||||
|
||||
return(ret);
|
||||
}
|
||||
|
||||
/** Updates the search info of an index about hash successes. NOTE that info
|
||||
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
|
||||
are consistent.
|
||||
@@ -641,28 +682,25 @@ btr_search_update_hash_ref(
|
||||
buf_block_t* block,
|
||||
const btr_cur_t* cursor)
|
||||
{
|
||||
dict_index_t* index;
|
||||
ulint fold;
|
||||
rec_t* rec;
|
||||
|
||||
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
|
||||
ut_ad(rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
|
||||
|
||||
ut_ad(rw_lock_own_flagged(&block->lock,
|
||||
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
|
||||
ut_ad(page_align(btr_cur_get_rec(cursor)) == block->frame);
|
||||
ut_ad(page_is_leaf(block->frame));
|
||||
assert_block_ahi_valid(block);
|
||||
|
||||
index = block->index;
|
||||
dict_index_t* index = block->index;
|
||||
|
||||
if (!index) {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ut_ad(block->page.id.space() == index->table->space_id);
|
||||
ut_ad(index == cursor->index);
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
rw_lock_t* const latch = btr_get_search_latch(index);
|
||||
rw_lock_x_lock(latch);
|
||||
|
||||
if ((info->n_hash_potential > 0)
|
||||
&& (block->curr_n_fields == info->n_fields)
|
||||
@@ -672,18 +710,18 @@ btr_search_update_hash_ref(
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
const rec_t* rec = btr_cur_get_rec(cursor);
|
||||
|
||||
if (!page_rec_is_user_rec(rec)) {
|
||||
|
||||
return;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
fold = rec_fold(rec,
|
||||
rec_get_offsets(rec, index, offsets_, true,
|
||||
ULINT_UNDEFINED, &heap),
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes, index->id);
|
||||
ulint fold = rec_fold(
|
||||
rec,
|
||||
rec_get_offsets(rec, index, offsets_, true,
|
||||
ULINT_UNDEFINED, &heap),
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes, index->id);
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
@@ -693,6 +731,9 @@ btr_search_update_hash_ref(
|
||||
|
||||
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
|
||||
}
|
||||
|
||||
func_exit:
|
||||
rw_lock_x_unlock(latch);
|
||||
}
|
||||
|
||||
/** Checks if a guessed position for a tree cursor is right. Note that if
|
||||
@@ -873,7 +914,6 @@ btr_search_guess_on_hash(
|
||||
rw_lock_t* ahi_latch,
|
||||
mtr_t* mtr)
|
||||
{
|
||||
const rec_t* rec;
|
||||
ulint fold;
|
||||
index_id_t index_id;
|
||||
#ifdef notdefined
|
||||
@@ -884,7 +924,7 @@ btr_search_guess_on_hash(
|
||||
ahi_latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
|
||||
|
||||
if (!btr_search_enabled) {
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
ut_ad(index && info && tuple && cursor && mtr);
|
||||
@@ -900,16 +940,14 @@ btr_search_guess_on_hash(
|
||||
any latch here! */
|
||||
|
||||
if (info->n_hash_potential == 0) {
|
||||
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
cursor->n_fields = info->n_fields;
|
||||
cursor->n_bytes = info->n_bytes;
|
||||
|
||||
if (dtuple_get_n_fields(tuple) < btr_search_get_n_fields(cursor)) {
|
||||
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
index_id = index->id;
|
||||
@@ -923,6 +961,7 @@ btr_search_guess_on_hash(
|
||||
cursor->flag = BTR_CUR_HASH;
|
||||
|
||||
rw_lock_t* use_latch = ahi_latch ? NULL : btr_get_search_latch(index);
|
||||
const rec_t* rec;
|
||||
|
||||
if (use_latch) {
|
||||
rw_lock_s_lock(use_latch);
|
||||
@@ -935,47 +974,55 @@ btr_search_guess_on_hash(
|
||||
ut_ad(rw_lock_own(ahi_latch, RW_LOCK_S));
|
||||
}
|
||||
|
||||
rec = (rec_t*) ha_search_and_get_data(
|
||||
btr_get_search_table(index), fold);
|
||||
rec = static_cast<const rec_t*>(
|
||||
ha_search_and_get_data(btr_get_search_table(index), fold));
|
||||
|
||||
if (rec == NULL) {
|
||||
if (!rec) {
|
||||
if (use_latch) {
|
||||
fail:
|
||||
rw_lock_s_unlock(use_latch);
|
||||
}
|
||||
|
||||
btr_search_failure(info, cursor);
|
||||
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
buf_block_t* block = buf_block_from_ahi(rec);
|
||||
|
||||
if (use_latch) {
|
||||
|
||||
if (!buf_page_get_known_nowait(
|
||||
latch_mode, block, BUF_MAKE_YOUNG,
|
||||
__FILE__, __LINE__, mtr)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
const bool fail = index != block->index
|
||||
&& index_id == block->index->id;
|
||||
ut_a(!fail || block->index->freed());
|
||||
rw_lock_s_unlock(use_latch);
|
||||
|
||||
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
|
||||
if (UNIV_UNLIKELY(fail)) {
|
||||
btr_search_drop_page_hash_index(block);
|
||||
goto fail_and_release_page;
|
||||
}
|
||||
} else if (UNIV_UNLIKELY(index != block->index
|
||||
&& index_id == block->index->id)) {
|
||||
ut_a(block->index->freed());
|
||||
goto fail_and_release_page;
|
||||
}
|
||||
|
||||
if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
|
||||
|
||||
ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
|
||||
|
||||
fail_and_release_page:
|
||||
if (!ahi_latch) {
|
||||
|
||||
btr_leaf_page_release(block, latch_mode, mtr);
|
||||
}
|
||||
|
||||
btr_search_failure(info, cursor);
|
||||
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
ut_ad(page_rec_is_user_rec(rec));
|
||||
@@ -991,14 +1038,7 @@ fail:
|
||||
right. */
|
||||
if (index_id != btr_page_get_index_id(block->frame)
|
||||
|| !btr_search_check_guess(cursor, !!ahi_latch, tuple, mode)) {
|
||||
|
||||
if (!ahi_latch) {
|
||||
btr_leaf_page_release(block, latch_mode, mtr);
|
||||
}
|
||||
|
||||
btr_search_failure(info, cursor);
|
||||
|
||||
return(FALSE);
|
||||
goto fail_and_release_page;
|
||||
}
|
||||
|
||||
if (info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5) {
|
||||
@@ -1059,7 +1099,7 @@ fail:
|
||||
++buf_pool->stat.n_page_gets;
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Drop any adaptive hash index entries that point to an index page.
|
||||
@@ -1081,32 +1121,28 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
|
||||
ulint* folds;
|
||||
ulint i;
|
||||
mem_heap_t* heap;
|
||||
const dict_index_t* index;
|
||||
rec_offs* offsets;
|
||||
rw_lock_t* latch;
|
||||
btr_search_t* info;
|
||||
|
||||
retry:
|
||||
/* Do a dirty check on block->index, return if the block is
|
||||
not in the adaptive hash index. */
|
||||
index = block->index;
|
||||
/* This debug check uses a dirty read that could theoretically cause
|
||||
false positives while buf_pool_clear_hash_index() is executing. */
|
||||
assert_block_ahi_valid(block);
|
||||
ut_ad(!btr_search_own_any(RW_LOCK_S));
|
||||
ut_ad(!btr_search_own_any(RW_LOCK_X));
|
||||
|
||||
if (index == NULL) {
|
||||
if (!block->index) {
|
||||
return;
|
||||
}
|
||||
|
||||
ut_ad(block->page.buf_fix_count == 0
|
||||
|| buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH
|
||||
|| rw_lock_own_flagged(&block->lock,
|
||||
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
|
||||
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S
|
||||
| RW_LOCK_FLAG_SX));
|
||||
ut_ad(page_is_leaf(block->frame));
|
||||
|
||||
/* We must not dereference index here, because it could be freed
|
||||
/* We must not dereference block->index here, because it could be freed
|
||||
if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys->mutex)).
|
||||
Determine the ahi_slot based on the block contents. */
|
||||
|
||||
@@ -1121,18 +1157,12 @@ retry:
|
||||
rw_lock_s_lock(latch);
|
||||
assert_block_ahi_valid(block);
|
||||
|
||||
if (block->index == NULL) {
|
||||
if (!block->index) {
|
||||
rw_lock_s_unlock(latch);
|
||||
return;
|
||||
}
|
||||
|
||||
/* The index associated with a block must remain the
|
||||
same, because we are holding block->lock or the block is
|
||||
not accessible by other threads (BUF_BLOCK_REMOVE_HASH),
|
||||
or the index is not accessible to other threads
|
||||
(buf_fix_count == 0 when DROP TABLE or similar is executing
|
||||
buf_LRU_drop_page_hash_for_tablespace()). */
|
||||
ut_a(index == block->index);
|
||||
dict_index_t* index = block->index;
|
||||
#ifdef MYSQL_INDEX_DISABLE_AHI
|
||||
ut_ad(!index->disable_ahi);
|
||||
#endif
|
||||
@@ -1140,7 +1170,7 @@ retry:
|
||||
|
||||
ut_ad(block->page.id.space() == index->table->space_id);
|
||||
ut_a(index_id == index->id);
|
||||
ut_a(!dict_index_is_ibuf(index));
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
#ifdef UNIV_DEBUG
|
||||
switch (dict_index_get_online_status(index)) {
|
||||
case ONLINE_INDEX_CREATION:
|
||||
@@ -1248,9 +1278,14 @@ next_rec:
|
||||
folds[i], page);
|
||||
}
|
||||
|
||||
info = btr_search_get_info(block->index);
|
||||
ut_a(info->ref_count > 0);
|
||||
info->ref_count--;
|
||||
switch (index->search_info->ref_count--) {
|
||||
case 0:
|
||||
ut_error;
|
||||
case 1:
|
||||
if (index->freed()) {
|
||||
btr_search_lazy_free(index);
|
||||
}
|
||||
}
|
||||
|
||||
block->index = NULL;
|
||||
|
||||
@@ -1351,11 +1386,12 @@ btr_search_build_page_hash_index(
|
||||
ut_ad(ahi_latch == btr_get_search_latch(index));
|
||||
ut_ad(index);
|
||||
ut_ad(block->page.id.space() == index->table->space_id);
|
||||
ut_a(!dict_index_is_ibuf(index));
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
ut_ad(page_is_leaf(block->frame));
|
||||
|
||||
ut_ad(rw_lock_own_flagged(&block->lock,
|
||||
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
|
||||
ut_ad(block->page.id.page_no() >= 3);
|
||||
|
||||
rw_lock_s_lock(ahi_latch);
|
||||
|
||||
@@ -1548,11 +1584,7 @@ btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor)
|
||||
btr_search_n_hash_fail++;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
rw_lock_x_lock(ahi_latch);
|
||||
|
||||
btr_search_update_hash_ref(info, block, cursor);
|
||||
|
||||
rw_lock_x_unlock(ahi_latch);
|
||||
}
|
||||
|
||||
if (build_index) {
|
||||
@@ -1670,7 +1702,7 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
|
||||
ut_ad(block->page.id.space() == index->table->space_id);
|
||||
ut_a(index == cursor->index);
|
||||
ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0);
|
||||
ut_a(!dict_index_is_ibuf(index));
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
|
||||
table = btr_get_search_table(index);
|
||||
|
||||
@@ -1741,7 +1773,7 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
|
||||
}
|
||||
|
||||
ut_a(cursor->index == index);
|
||||
ut_a(!dict_index_is_ibuf(index));
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
rw_lock_x_lock(ahi_latch);
|
||||
|
||||
if (!block->index) {
|
||||
@@ -1794,8 +1826,6 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
|
||||
ulint next_fold = 0; /* remove warning (??? bug ???) */
|
||||
ulint n_fields;
|
||||
ulint n_bytes;
|
||||
ibool left_side;
|
||||
bool locked = false;
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets = offsets_;
|
||||
@@ -1835,11 +1865,11 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
|
||||
ut_a(!index->disable_ahi);
|
||||
#endif
|
||||
ut_a(index == cursor->index);
|
||||
ut_a(!dict_index_is_ibuf(index));
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
|
||||
n_fields = block->curr_n_fields;
|
||||
n_bytes = block->curr_n_bytes;
|
||||
left_side = block->curr_left_side;
|
||||
const bool left_side = block->curr_left_side;
|
||||
|
||||
ins_rec = page_rec_get_next_const(rec);
|
||||
next_rec = page_rec_get_next_const(ins_rec);
|
||||
@@ -1856,6 +1886,8 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
|
||||
n_bytes, index->id);
|
||||
}
|
||||
|
||||
bool locked = false;
|
||||
|
||||
if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, index)) {
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, true,
|
||||
@@ -1914,7 +1946,6 @@ check_next_rec:
|
||||
}
|
||||
|
||||
if (ins_fold != next_fold) {
|
||||
|
||||
if (!locked) {
|
||||
locked = true;
|
||||
rw_lock_x_lock(ahi_latch);
|
||||
@@ -2044,7 +2075,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
|
||||
== BUF_BLOCK_REMOVE_HASH);
|
||||
}
|
||||
|
||||
ut_a(!dict_index_is_ibuf(block->index));
|
||||
ut_ad(!dict_index_is_ibuf(block->index));
|
||||
ut_ad(block->page.id.space()
|
||||
== block->index->table->space_id);
|
||||
|
||||
|
||||
@@ -3209,66 +3209,6 @@ DECLARE_THREAD(buf_resize_thread)(void*)
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** Clear the adaptive hash index on all pages in the buffer pool. */
|
||||
void
|
||||
buf_pool_clear_hash_index()
|
||||
{
|
||||
ulint p;
|
||||
|
||||
ut_ad(btr_search_own_all(RW_LOCK_X));
|
||||
ut_ad(!buf_pool_resizing);
|
||||
ut_ad(!btr_search_enabled);
|
||||
|
||||
for (p = 0; p < srv_buf_pool_instances; p++) {
|
||||
buf_pool_t* buf_pool = buf_pool_from_array(p);
|
||||
buf_chunk_t* chunks = buf_pool->chunks;
|
||||
buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
|
||||
|
||||
while (--chunk >= chunks) {
|
||||
buf_block_t* block = chunk->blocks;
|
||||
ulint i = chunk->size;
|
||||
|
||||
for (; i--; block++) {
|
||||
dict_index_t* index = block->index;
|
||||
assert_block_ahi_valid(block);
|
||||
|
||||
/* We can set block->index = NULL
|
||||
and block->n_pointers = 0
|
||||
when btr_search_own_all(RW_LOCK_X);
|
||||
see the comments in buf0buf.h */
|
||||
|
||||
if (!index) {
|
||||
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
ut_a(!block->n_pointers);
|
||||
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
||||
continue;
|
||||
}
|
||||
|
||||
ut_d(buf_page_state state
|
||||
= buf_block_get_state(block));
|
||||
/* Another thread may have set the
|
||||
state to BUF_BLOCK_REMOVE_HASH in
|
||||
buf_LRU_block_remove_hashed().
|
||||
|
||||
The state change in buf_page_realloc()
|
||||
is not observable here, because in
|
||||
that case we would have !block->index.
|
||||
|
||||
In the end, the entire adaptive hash
|
||||
index will be removed. */
|
||||
ut_ad(state == BUF_BLOCK_FILE_PAGE
|
||||
|| state == BUF_BLOCK_REMOVE_HASH);
|
||||
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
block->n_pointers = 0;
|
||||
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
||||
block->index = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/********************************************************************//**
|
||||
Relocate a buffer control block. Relocates the block on the LRU list
|
||||
and in buf_pool->page_hash. Does not relocate bpage->list.
|
||||
@@ -4254,7 +4194,7 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
|
||||
{
|
||||
case RW_NO_LATCH:
|
||||
fix_type= MTR_MEMO_BUF_FIX;
|
||||
break;
|
||||
goto done;
|
||||
case RW_S_LATCH:
|
||||
rw_lock_s_lock_inline(&block->lock, 0, file, line);
|
||||
fix_type= MTR_MEMO_PAGE_S_FIX;
|
||||
@@ -4270,6 +4210,15 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
{
|
||||
dict_index_t *index= block->index;
|
||||
if (index && index->freed())
|
||||
btr_search_drop_page_hash_index(block);
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
done:
|
||||
mtr_memo_push(mtr, block, fix_type);
|
||||
return block;
|
||||
}
|
||||
@@ -4592,6 +4541,7 @@ evict_from_pool:
|
||||
buf_pool_mutex_exit(buf_pool);
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case BUF_BLOCK_ZIP_PAGE:
|
||||
@@ -5135,9 +5085,11 @@ buf_page_get_known_nowait(
|
||||
|
||||
buf_pool = buf_pool_from_block(block);
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (mode == BUF_MAKE_YOUNG) {
|
||||
buf_page_make_young_if_needed(&block->page);
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
|
||||
|
||||
@@ -5180,9 +5132,12 @@ buf_page_get_known_nowait(
|
||||
deleting a record from SYS_INDEXES. This check will be
|
||||
skipped in recv_recover_page() as well. */
|
||||
|
||||
buf_page_mutex_enter(block);
|
||||
ut_a(!block->page.file_page_was_freed);
|
||||
buf_page_mutex_exit(block);
|
||||
# ifdef BTR_CUR_HASH_ADAPT
|
||||
ut_ad(!block->page.file_page_was_freed
|
||||
|| (block->index && block->index->freed()));
|
||||
# else /* BTR_CUR_HASH_ADAPT */
|
||||
ut_ad(!block->page.file_page_was_freed);
|
||||
# endif /* BTR_CUR_HASH_ADAPT */
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
@@ -5673,6 +5628,12 @@ buf_page_create(
|
||||
rw_lock_x_unlock(hash_lock);
|
||||
|
||||
buf_block_free(free_block);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (block->page.state == BUF_BLOCK_FILE_PAGE
|
||||
&& UNIV_LIKELY_NULL(block->index)) {
|
||||
btr_search_drop_page_hash_index(block);
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
if (!recv_recovery_is_on()) {
|
||||
return buf_page_get_with_no_latch(page_id, page_size,
|
||||
|
||||
@@ -219,166 +219,6 @@ buf_LRU_evict_from_unzip_LRU(
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** Attempts to drop page hash index on a batch of pages belonging to a
|
||||
particular space id.
|
||||
@param[in] space_id space id
|
||||
@param[in] arr array of page_no
|
||||
@param[in] count number of entries in array */
|
||||
static
|
||||
void
|
||||
buf_LRU_drop_page_hash_batch(ulint space_id, const ulint* arr, ulint count)
|
||||
{
|
||||
ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
|
||||
|
||||
for (const ulint* const end = arr + count; arr != end; ) {
|
||||
/* While our only caller
|
||||
buf_LRU_drop_page_hash_for_tablespace()
|
||||
is being executed for DROP TABLE or similar,
|
||||
the table cannot be evicted from the buffer pool. */
|
||||
btr_search_drop_page_hash_when_freed(
|
||||
page_id_t(space_id, *arr++));
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************//**
|
||||
When doing a DROP TABLE/DISCARD TABLESPACE we have to drop all page
|
||||
hash index entries belonging to that table. This function tries to
|
||||
do that in batch. Note that this is a 'best effort' attempt and does
|
||||
not guarantee that ALL hash entries will be removed. */
|
||||
static
|
||||
void
|
||||
buf_LRU_drop_page_hash_for_tablespace(
|
||||
/*==================================*/
|
||||
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
|
||||
ulint id) /*!< in: space id */
|
||||
{
|
||||
ulint* page_arr = static_cast<ulint*>(ut_malloc_nokey(
|
||||
sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE));
|
||||
|
||||
ulint num_entries = 0;
|
||||
|
||||
buf_pool_mutex_enter(buf_pool);
|
||||
|
||||
scan_again:
|
||||
for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
|
||||
bpage != NULL;
|
||||
/* No op */) {
|
||||
|
||||
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
|
||||
|
||||
ut_a(buf_page_in_file(bpage));
|
||||
|
||||
if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE
|
||||
|| bpage->id.space() != id
|
||||
|| bpage->io_fix != BUF_IO_NONE) {
|
||||
/* Compressed pages are never hashed.
|
||||
Skip blocks of other tablespaces.
|
||||
Skip I/O-fixed blocks (to be dealt with later). */
|
||||
next_page:
|
||||
bpage = prev_bpage;
|
||||
continue;
|
||||
}
|
||||
|
||||
buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
|
||||
|
||||
mutex_enter(&block->mutex);
|
||||
|
||||
/* This debug check uses a dirty read that could
|
||||
theoretically cause false positives while
|
||||
buf_pool_clear_hash_index() is executing.
|
||||
(Other conflicting access paths to the adaptive hash
|
||||
index should not be possible, because when a
|
||||
tablespace is being discarded or dropped, there must
|
||||
be no concurrect access to the contained tables.) */
|
||||
assert_block_ahi_valid(block);
|
||||
|
||||
bool skip = bpage->buf_fix_count > 0 || !block->index;
|
||||
|
||||
mutex_exit(&block->mutex);
|
||||
|
||||
if (skip) {
|
||||
/* Skip this block, because there are
|
||||
no adaptive hash index entries
|
||||
pointing to it, or because we cannot
|
||||
drop them due to the buffer-fix. */
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
/* Store the page number so that we can drop the hash
|
||||
index in a batch later. */
|
||||
page_arr[num_entries] = bpage->id.page_no();
|
||||
ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE);
|
||||
++num_entries;
|
||||
|
||||
if (num_entries < BUF_LRU_DROP_SEARCH_SIZE) {
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
/* Array full. We release the buf_pool->mutex to obey
|
||||
the latching order. */
|
||||
buf_pool_mutex_exit(buf_pool);
|
||||
|
||||
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
|
||||
|
||||
num_entries = 0;
|
||||
|
||||
buf_pool_mutex_enter(buf_pool);
|
||||
|
||||
/* Note that we released the buf_pool mutex above
|
||||
after reading the prev_bpage during processing of a
|
||||
page_hash_batch (i.e.: when the array was full).
|
||||
Because prev_bpage could belong to a compressed-only
|
||||
block, it may have been relocated, and thus the
|
||||
pointer cannot be trusted. Because bpage is of type
|
||||
buf_block_t, it is safe to dereference.
|
||||
|
||||
bpage can change in the LRU list. This is OK because
|
||||
this function is a 'best effort' to drop as many
|
||||
search hash entries as possible and it does not
|
||||
guarantee that ALL such entries will be dropped. */
|
||||
|
||||
/* If, however, bpage has been removed from LRU list
|
||||
to the free list then we should restart the scan.
|
||||
bpage->state is protected by buf_pool mutex. */
|
||||
if (bpage != NULL
|
||||
&& buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
|
||||
|
||||
goto scan_again;
|
||||
}
|
||||
}
|
||||
|
||||
buf_pool_mutex_exit(buf_pool);
|
||||
|
||||
/* Drop any remaining batch of search hashed pages. */
|
||||
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
|
||||
ut_free(page_arr);
|
||||
}
|
||||
|
||||
/** Try to drop the adaptive hash index for a tablespace.
|
||||
@param[in,out] table table
|
||||
@return whether anything was dropped */
|
||||
bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
||||
{
|
||||
for (dict_index_t* index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
if (btr_search_info_get_ref_count(btr_search_get_info(index),
|
||||
index)) {
|
||||
goto drop_ahi;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
drop_ahi:
|
||||
ulint id = table->space_id;
|
||||
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
|
||||
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
|
||||
id);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/******************************************************************//**
|
||||
While flushing (or removing dirty) pages from a tablespace we don't
|
||||
want to hog the CPU and resources. Release the buffer pool and block
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -26,7 +26,9 @@ Created 1/8/1996 Heikki Tuuri
|
||||
|
||||
#include "dict0crea.h"
|
||||
#include "btr0pcur.h"
|
||||
#include "btr0btr.h"
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
# include "btr0sea.h"
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
#include "page0page.h"
|
||||
#include "mach0data.h"
|
||||
#include "dict0boot.h"
|
||||
@@ -1412,6 +1414,9 @@ dict_create_index_step(
|
||||
&node->table->fts->cache->init_lock);
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
ut_ad(!node->index->search_info->ref_count);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
dict_index_remove_from_cache(node->table, node->index);
|
||||
node->index = NULL;
|
||||
|
||||
|
||||
@@ -1337,25 +1337,12 @@ dict_table_can_be_evicted(
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/* We cannot really evict the table if adaptive hash
|
||||
index entries are pointing to any of its indexes. */
|
||||
for (dict_index_t* index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
|
||||
btr_search_t* info = btr_search_get_info(index);
|
||||
|
||||
/* We are not allowed to free the in-memory index
|
||||
struct dict_index_t until all entries in the adaptive
|
||||
hash index that point to any of the page belonging to
|
||||
his b-tree index are dropped. This is so because
|
||||
dropping of these entries require access to
|
||||
dict_index_t struct. To avoid such scenario we keep
|
||||
a count of number of such pages in the search_info and
|
||||
only free the dict_index_t struct when this count
|
||||
drops to zero.
|
||||
|
||||
See also: dict_index_remove_from_cache_low() */
|
||||
|
||||
if (btr_search_info_get_ref_count(info, index) > 0) {
|
||||
if (index->n_ahi_pages()) {
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
@@ -1367,6 +1354,71 @@ dict_table_can_be_evicted(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** @return a clone of this */
|
||||
dict_index_t *dict_index_t::clone() const
|
||||
{
|
||||
ut_ad(n_fields);
|
||||
ut_ad(!(type & (DICT_IBUF | DICT_SPATIAL | DICT_FTS)));
|
||||
ut_ad(online_status == ONLINE_INDEX_COMPLETE);
|
||||
ut_ad(is_committed());
|
||||
ut_ad(!is_dummy);
|
||||
ut_ad(!parser);
|
||||
ut_ad(!index_fts_syncing);
|
||||
ut_ad(!online_log);
|
||||
ut_ad(!rtr_track);
|
||||
|
||||
const size_t size= sizeof *this + n_fields * sizeof(*fields) +
|
||||
#ifdef BTR_CUR_ADAPT
|
||||
sizeof *search_info +
|
||||
#endif
|
||||
1 + strlen(name) +
|
||||
n_uniq * (sizeof *stat_n_diff_key_vals +
|
||||
sizeof *stat_n_sample_sizes +
|
||||
sizeof *stat_n_non_null_key_vals);
|
||||
|
||||
mem_heap_t* heap= mem_heap_create(size);
|
||||
dict_index_t *index= static_cast<dict_index_t*>(mem_heap_dup(heap, this,
|
||||
sizeof *this));
|
||||
*index= *this;
|
||||
rw_lock_create(index_tree_rw_lock_key, &index->lock, SYNC_INDEX_TREE);
|
||||
index->heap= heap;
|
||||
index->name= mem_heap_strdup(heap, name);
|
||||
index->fields= static_cast<dict_field_t*>
|
||||
(mem_heap_dup(heap, fields, n_fields * sizeof *fields));
|
||||
#ifdef BTR_CUR_ADAPT
|
||||
index->search_info= btr_search_info_create(index->heap);
|
||||
#endif /* BTR_CUR_ADAPT */
|
||||
index->stat_n_diff_key_vals= static_cast<ib_uint64_t*>
|
||||
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_diff_key_vals));
|
||||
index->stat_n_sample_sizes= static_cast<ib_uint64_t*>
|
||||
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_sample_sizes));
|
||||
index->stat_n_non_null_key_vals= static_cast<ib_uint64_t*>
|
||||
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_non_null_key_vals));
|
||||
memset(&index->zip_pad, 0, sizeof index->zip_pad);
|
||||
return index;
|
||||
}
|
||||
|
||||
/** Clone this index for lazy dropping of the adaptive hash.
|
||||
@return this or a clone */
|
||||
dict_index_t *dict_index_t::clone_if_needed()
|
||||
{
|
||||
if (!search_info->ref_count)
|
||||
return this;
|
||||
dict_index_t *prev= UT_LIST_GET_PREV(indexes, this);
|
||||
|
||||
UT_LIST_REMOVE(table->indexes, this);
|
||||
UT_LIST_ADD_LAST(table->freed_indexes, this);
|
||||
dict_index_t *index= clone();
|
||||
set_freed();
|
||||
if (prev)
|
||||
UT_LIST_INSERT_AFTER(table->indexes, prev, index);
|
||||
else
|
||||
UT_LIST_ADD_FIRST(table->indexes, index);
|
||||
return index;
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/**********************************************************************//**
|
||||
Make room in the table cache by evicting an unused table. The unused table
|
||||
should not be part of FK relationship and currently not used in any user
|
||||
@@ -2034,6 +2086,14 @@ dict_table_remove_from_cache_low(
|
||||
UT_DELETE(table->vc_templ);
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (UNIV_UNLIKELY(UT_LIST_GET_LEN(table->freed_indexes) != 0)) {
|
||||
table->vc_templ = NULL;
|
||||
table->id = 0;
|
||||
return;
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
dict_mem_table_free(table);
|
||||
}
|
||||
|
||||
@@ -2258,6 +2318,8 @@ dict_index_remove_from_cache_low(
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
ut_ad(table->id);
|
||||
ut_ad(!index->freed());
|
||||
|
||||
/* No need to acquire the dict_index_t::lock here because
|
||||
there can't be any active operations on this index (or table). */
|
||||
@@ -2267,13 +2329,22 @@ dict_index_remove_from_cache_low(
|
||||
row_log_free(index->online_log);
|
||||
}
|
||||
|
||||
/* Remove the index from the list of indexes of the table */
|
||||
UT_LIST_REMOVE(table->indexes, index);
|
||||
|
||||
/* The index is being dropped, remove any compression stats for it. */
|
||||
if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) {
|
||||
mutex_enter(&page_zip_stat_per_index_mutex);
|
||||
page_zip_stat_per_index.erase(index->id);
|
||||
mutex_exit(&page_zip_stat_per_index_mutex);
|
||||
}
|
||||
|
||||
/* Remove the index from affected virtual column index list */
|
||||
index->detach_columns();
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/* We always create search info whether or not adaptive
|
||||
hash index is enabled or not. */
|
||||
btr_search_t* info = btr_search_get_info(index);
|
||||
ulint retries = 0;
|
||||
ut_ad(info);
|
||||
|
||||
/* We are not allowed to free the in-memory index struct
|
||||
dict_index_t until all entries in the adaptive hash index
|
||||
that point to any of the page belonging to his b-tree index
|
||||
@@ -2283,31 +2354,15 @@ dict_index_remove_from_cache_low(
|
||||
only free the dict_index_t struct when this count drops to
|
||||
zero. See also: dict_table_can_be_evicted() */
|
||||
|
||||
do {
|
||||
if (!btr_search_info_get_ref_count(info, index)
|
||||
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||
break;
|
||||
}
|
||||
|
||||
ut_a(++retries < 10000);
|
||||
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
|
||||
if (index->n_ahi_pages()) {
|
||||
index->set_freed();
|
||||
UT_LIST_ADD_LAST(table->freed_indexes, index);
|
||||
return;
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
rw_lock_free(&index->lock);
|
||||
|
||||
/* The index is being dropped, remove any compression stats for it. */
|
||||
if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) {
|
||||
mutex_enter(&page_zip_stat_per_index_mutex);
|
||||
page_zip_stat_per_index.erase(index->id);
|
||||
mutex_exit(&page_zip_stat_per_index_mutex);
|
||||
}
|
||||
|
||||
/* Remove the index from the list of indexes of the table */
|
||||
UT_LIST_REMOVE(table->indexes, index);
|
||||
|
||||
/* Remove the index from affected virtual column index list */
|
||||
index->detach_columns();
|
||||
|
||||
dict_mem_index_free(index);
|
||||
}
|
||||
|
||||
|
||||
@@ -148,6 +148,9 @@ dict_mem_table_create(
|
||||
lock_table_lock_list_init(&table->locks);
|
||||
|
||||
UT_LIST_INIT(table->indexes, &dict_index_t::indexes);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
UT_LIST_INIT(table->freed_indexes, &dict_index_t::indexes);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
table->heap = heap;
|
||||
|
||||
@@ -204,6 +207,10 @@ dict_mem_table_free(
|
||||
{
|
||||
ut_ad(table);
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
ut_ad(UT_LIST_GET_LEN(table->indexes) == 0);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
ut_ad(UT_LIST_GET_LEN(table->freed_indexes) == 0);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
ut_d(table->cached = FALSE);
|
||||
|
||||
if (dict_table_has_fts_index(table)
|
||||
|
||||
@@ -424,6 +424,9 @@ dict_stats_table_clone_create(
|
||||
dict_table_stats_latch_create(t, false);
|
||||
|
||||
UT_LIST_INIT(t->indexes, &dict_index_t::indexes);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
UT_LIST_INIT(t->freed_indexes, &dict_index_t::indexes);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
for (index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
@@ -4001,6 +4004,9 @@ test_dict_stats_save()
|
||||
table.stat_clustered_index_size = TEST_CLUSTERED_INDEX_SIZE;
|
||||
table.stat_sum_of_other_index_sizes = TEST_SUM_OF_OTHER_INDEX_SIZES;
|
||||
UT_LIST_INIT(table.indexes, &dict_index_t::indexes);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
UT_LIST_INIT(table.freed_indexes, &dict_index_t::indexes);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
UT_LIST_ADD_LAST(table.indexes, &index1);
|
||||
UT_LIST_ADD_LAST(table.indexes, &index2);
|
||||
ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
|
||||
@@ -4150,6 +4156,9 @@ test_dict_stats_fetch_from_ps()
|
||||
/* craft a dummy dict_table_t */
|
||||
table.name.m_name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME);
|
||||
UT_LIST_INIT(table.indexes, &dict_index_t::indexes);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
UT_LIST_INIT(table.freed_indexes, &dict_index_t::indexes);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
UT_LIST_ADD_LAST(table.indexes, &index1);
|
||||
UT_LIST_ADD_LAST(table.indexes, &index2);
|
||||
ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
|
||||
|
||||
@@ -2794,8 +2794,6 @@ fseg_mark_page_used(
|
||||
@param[in,out] space tablespace
|
||||
@param[in] offset page number
|
||||
@param[in] page_size page size
|
||||
@param[in] ahi whether we may need to drop the adaptive
|
||||
hash index
|
||||
@param[in,out] mtr mini-transaction */
|
||||
static
|
||||
void
|
||||
@@ -2804,9 +2802,6 @@ fseg_free_page_low(
|
||||
fil_space_t* space,
|
||||
page_no_t offset,
|
||||
const page_size_t& page_size,
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi,
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr)
|
||||
{
|
||||
xdes_t* descr;
|
||||
@@ -2821,15 +2816,6 @@ fseg_free_page_low(
|
||||
== FSEG_MAGIC_N_VALUE);
|
||||
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
|
||||
ut_d(space->modify_check(*mtr));
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/* Drop search system page hash index if the page is found in
|
||||
the pool and is hashed */
|
||||
|
||||
if (ahi) {
|
||||
btr_search_drop_page_hash_when_freed(
|
||||
page_id_t(space->id, offset));
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
descr = xdes_get_descriptor(space, offset, page_size, mtr);
|
||||
|
||||
@@ -2915,26 +2901,16 @@ fseg_free_page_low(
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef BTR_CUR_HASH_ADAPT
|
||||
# define fseg_free_page_low(inode, space, offset, page_size, ahi, mtr) \
|
||||
fseg_free_page_low(inode, space, offset, page_size, mtr)
|
||||
#endif /* !BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/** Free a page in a file segment.
|
||||
@param[in,out] seg_header file segment header
|
||||
@param[in,out] space tablespace
|
||||
@param[in] offset page number
|
||||
@param[in] ahi whether we may need to drop the adaptive
|
||||
hash index
|
||||
@param[in,out] mtr mini-transaction */
|
||||
void
|
||||
fseg_free_page_func(
|
||||
fseg_free_page(
|
||||
fseg_header_t* seg_header,
|
||||
fil_space_t* space,
|
||||
ulint offset,
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi,
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr)
|
||||
{
|
||||
DBUG_ENTER("fseg_free_page");
|
||||
@@ -2950,7 +2926,7 @@ fseg_free_page_func(
|
||||
&iblock);
|
||||
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
|
||||
|
||||
fseg_free_page_low(seg_inode, space, offset, page_size, ahi, mtr);
|
||||
fseg_free_page_low(seg_inode, space, offset, page_size, mtr);
|
||||
|
||||
ut_d(buf_page_set_file_page_was_freed(page_id_t(space->id, offset)));
|
||||
|
||||
@@ -2991,8 +2967,6 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
|
||||
@param[in,out] space tablespace
|
||||
@param[in] page_size page size
|
||||
@param[in] page page number in the extent
|
||||
@param[in] ahi whether we may need to drop
|
||||
the adaptive hash index
|
||||
@param[in,out] mtr mini-transaction */
|
||||
MY_ATTRIBUTE((nonnull))
|
||||
static
|
||||
@@ -3002,9 +2976,6 @@ fseg_free_extent(
|
||||
fil_space_t* space,
|
||||
const page_size_t& page_size,
|
||||
ulint page,
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi,
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr)
|
||||
{
|
||||
xdes_t* descr;
|
||||
@@ -3025,23 +2996,6 @@ fseg_free_extent(
|
||||
const ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
|
||||
#endif /* BTR_CUR_HASH_ADAPT || UNIV_DEBUG */
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (ahi) {
|
||||
for (ulint i = 0; i < FSP_EXTENT_SIZE; i++) {
|
||||
if (!xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
|
||||
|
||||
/* Drop search system page hash index
|
||||
if the page is found in the pool and
|
||||
is hashed */
|
||||
|
||||
btr_search_drop_page_hash_when_freed(
|
||||
page_id_t(space->id,
|
||||
first_page_in_extent + i));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
if (xdes_is_full(descr, mtr)) {
|
||||
flst_remove(seg_inode + FSEG_FULL,
|
||||
descr + XDES_FLST_NODE, mtr);
|
||||
@@ -3073,27 +3027,18 @@ fseg_free_extent(
|
||||
#endif /* UNIV_DEBUG */
|
||||
}
|
||||
|
||||
#ifndef BTR_CUR_HASH_ADAPT
|
||||
# define fseg_free_extent(inode, space, page_size, page, ahi, mtr) \
|
||||
fseg_free_extent(inode, space, page_size, page, mtr)
|
||||
#endif /* !BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/**********************************************************************//**
|
||||
Frees part of a segment. This function can be used to free a segment by
|
||||
repeatedly calling this function in different mini-transactions. Doing
|
||||
the freeing in a single mini-transaction might result in too big a
|
||||
mini-transaction.
|
||||
@return TRUE if freeing completed */
|
||||
ibool
|
||||
fseg_free_step_func(
|
||||
@return whether the freeing was completed */
|
||||
bool
|
||||
fseg_free_step(
|
||||
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
|
||||
resides on the first page of the frag list
|
||||
of the segment, this pointer becomes obsolete
|
||||
after the last freeing step */
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi, /*!< in: whether we may need to drop
|
||||
the adaptive hash index */
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
{
|
||||
ulint n;
|
||||
@@ -3125,7 +3070,7 @@ fseg_free_step_func(
|
||||
if (inode == NULL) {
|
||||
ib::info() << "Double free of inode from "
|
||||
<< page_id_t(space_id, header_page);
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
|
||||
@@ -3135,9 +3080,9 @@ fseg_free_step_func(
|
||||
/* Free the extent held by the segment */
|
||||
page = xdes_get_offset(descr);
|
||||
|
||||
fseg_free_extent(inode, space, page_size, page, ahi, mtr);
|
||||
fseg_free_extent(inode, space, page_size, page, mtr);
|
||||
|
||||
DBUG_RETURN(FALSE);
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
/* Free a frag page */
|
||||
@@ -3147,13 +3092,13 @@ fseg_free_step_func(
|
||||
/* Freeing completed: free the segment inode */
|
||||
fsp_free_seg_inode(space, page_size, inode, mtr);
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
fseg_free_page_low(
|
||||
inode, space,
|
||||
fseg_get_nth_frag_page_no(inode, n, mtr),
|
||||
page_size, ahi, mtr);
|
||||
page_size, mtr);
|
||||
|
||||
n = fseg_find_last_used_frag_page_slot(inode, mtr);
|
||||
|
||||
@@ -3161,24 +3106,20 @@ fseg_free_step_func(
|
||||
/* Freeing completed: free the segment inode */
|
||||
fsp_free_seg_inode(space, page_size, inode, mtr);
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
DBUG_RETURN(FALSE);
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Frees part of a segment. Differs from fseg_free_step because this function
|
||||
leaves the header page unfreed.
|
||||
@return TRUE if freeing completed, except the header page */
|
||||
ibool
|
||||
fseg_free_step_not_header_func(
|
||||
@return whether the freeing was completed, except for the header page */
|
||||
bool
|
||||
fseg_free_step_not_header(
|
||||
fseg_header_t* header, /*!< in: segment header which must reside on
|
||||
the first fragment page of the segment */
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi, /*!< in: whether we may need to drop
|
||||
the adaptive hash index */
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
{
|
||||
ulint n;
|
||||
@@ -3204,29 +3145,27 @@ fseg_free_step_not_header_func(
|
||||
/* Free the extent held by the segment */
|
||||
page = xdes_get_offset(descr);
|
||||
|
||||
fseg_free_extent(inode, space, page_size, page, ahi, mtr);
|
||||
fseg_free_extent(inode, space, page_size, page, mtr);
|
||||
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Free a frag page */
|
||||
|
||||
n = fseg_find_last_used_frag_page_slot(inode, mtr);
|
||||
|
||||
if (n == ULINT_UNDEFINED) {
|
||||
ut_error;
|
||||
}
|
||||
ut_a(n != ULINT_UNDEFINED);
|
||||
|
||||
page_no = fseg_get_nth_frag_page_no(inode, n, mtr);
|
||||
|
||||
if (page_no == page_get_page_no(page_align(header))) {
|
||||
|
||||
return(TRUE);
|
||||
return true;
|
||||
}
|
||||
|
||||
fseg_free_page_low(inode, space, page_no, page_size, ahi, mtr);
|
||||
fseg_free_page_low(inode, space, page_no, page_size, mtr);
|
||||
|
||||
return(FALSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Returns the first extent descriptor for a segment.
|
||||
|
||||
@@ -328,6 +328,7 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
|
||||
for (ulint i = 0; i < num_to_add_index; i++) {
|
||||
if (!add_index[i]->is_committed()) {
|
||||
add_index[i]->detach_columns();
|
||||
add_index[i]->n_fields = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9842,21 +9843,14 @@ foreign_fail:
|
||||
}
|
||||
|
||||
if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) {
|
||||
/* FIXME: this workaround does not seem to work with
|
||||
partitioned tables */
|
||||
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
|
||||
|
||||
trx_commit_for_mysql(m_prebuilt->trx);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (btr_search_enabled) {
|
||||
btr_search_disable(false);
|
||||
btr_search_enable();
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
char tb_name[FN_REFLEN];
|
||||
ut_strcpy(tb_name, m_prebuilt->table->name.m_name);
|
||||
|
||||
tb_name[strlen(m_prebuilt->table->name.m_name)] = 0;
|
||||
|
||||
char tb_name[NAME_LEN * 2 + 1 + 1];
|
||||
strcpy(tb_name, m_prebuilt->table->name.m_name);
|
||||
dict_table_close(m_prebuilt->table, true, false);
|
||||
dict_table_remove_from_cache(m_prebuilt->table);
|
||||
m_prebuilt->table = dict_table_open_on_name(
|
||||
|
||||
@@ -2100,7 +2100,7 @@ ibuf_remove_free_page(void)
|
||||
|
||||
compile_time_assert(IBUF_SPACE_ID == 0);
|
||||
fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
|
||||
fil_system.sys_space, page_no, false, &mtr);
|
||||
fil_system.sys_space, page_no, &mtr);
|
||||
|
||||
const page_id_t page_id(IBUF_SPACE_ID, page_no);
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2015, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2015, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2018, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -48,15 +48,6 @@ void btr_search_disable(bool need_mutex);
|
||||
/** Enable the adaptive hash search system. */
|
||||
void btr_search_enable();
|
||||
|
||||
/** Returns the value of ref_count. The value is protected by latch.
|
||||
@param[in] info search info
|
||||
@param[in] index index identifier
|
||||
@return ref_count value. */
|
||||
ulint
|
||||
btr_search_info_get_ref_count(
|
||||
btr_search_t* info,
|
||||
dict_index_t* index);
|
||||
|
||||
/*********************************************************************//**
|
||||
Updates the search info. */
|
||||
UNIV_INLINE
|
||||
@@ -272,6 +263,18 @@ struct btr_search_t{
|
||||
};
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** @return number of leaf pages pointed to by the adaptive hash index */
|
||||
inline ulint dict_index_t::n_ahi_pages() const
|
||||
{
|
||||
if (!btr_search_enabled)
|
||||
return 0;
|
||||
rw_lock_t *latch = btr_get_search_latch(this);
|
||||
rw_lock_s_lock(latch);
|
||||
ulint ref_count= search_info->ref_count;
|
||||
rw_lock_s_unlock(latch);
|
||||
return ref_count;
|
||||
}
|
||||
|
||||
/** The hash index system */
|
||||
struct btr_search_sys_t{
|
||||
hash_table_t** hash_tables; /*!< the adaptive hash tables,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2018, MariaDB Corporation.
|
||||
Copyright (c) 2018, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
||||
@@ -71,11 +71,13 @@ struct fil_addr_t;
|
||||
/* @} */
|
||||
/** @name Modes for buf_page_get_known_nowait */
|
||||
/* @{ */
|
||||
#define BUF_MAKE_YOUNG 51 /*!< Move the block to the
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
# define BUF_MAKE_YOUNG 51 /*!< Move the block to the
|
||||
start of the LRU list if there
|
||||
is a danger that the block
|
||||
would drift out of the buffer
|
||||
pool*/
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
#define BUF_KEEP_OLD 52 /*!< Preserve the current LRU
|
||||
position of the block. */
|
||||
/* @} */
|
||||
@@ -282,12 +284,6 @@ extern "C"
|
||||
os_thread_ret_t
|
||||
DECLARE_THREAD(buf_resize_thread)(void*);
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** Clear the adaptive hash index on all pages in the buffer pool. */
|
||||
void
|
||||
buf_pool_clear_hash_index();
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/*********************************************************************//**
|
||||
Gets the current size of buffer buf_pool in bytes.
|
||||
@return size in bytes */
|
||||
|
||||
@@ -50,17 +50,6 @@ These are low-level functions
|
||||
/** Minimum LRU list length for which the LRU_old pointer is defined */
|
||||
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
struct dict_table_t;
|
||||
/** Try to drop the adaptive hash index for a tablespace.
|
||||
@param[in,out] table table
|
||||
@return whether anything was dropped */
|
||||
bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
||||
MY_ATTRIBUTE((warn_unused_result,nonnull));
|
||||
#else
|
||||
# define buf_LRU_drop_page_hash_for_tablespace(table)
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/** Empty the flush list for all pages belonging to a tablespace.
|
||||
@param[in] id tablespace identifier
|
||||
@param[in,out] observer flush observer,
|
||||
|
||||
@@ -839,7 +839,10 @@ struct dict_index_t{
|
||||
mem_heap_t* heap; /*!< memory heap */
|
||||
id_name_t name; /*!< index name */
|
||||
dict_table_t* table; /*!< back pointer to table */
|
||||
unsigned page:32;/*!< index tree root page number */
|
||||
/** root page number, or FIL_NULL if the index has been detached
|
||||
from storage (DISCARD TABLESPACE or similar),
|
||||
or 1 if the index is in table->freed_indexes */
|
||||
unsigned page:32;
|
||||
unsigned merge_threshold:6;
|
||||
/*!< In the pessimistic delete, if the page
|
||||
data size drops below this limit in percent,
|
||||
@@ -1050,8 +1053,6 @@ struct dict_index_t{
|
||||
for (unsigned i = 0; i < n_fields; i++) {
|
||||
fields[i].col->detach(*this);
|
||||
}
|
||||
|
||||
n_fields = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1118,6 +1119,20 @@ struct dict_index_t{
|
||||
bool
|
||||
vers_history_row(const rec_t* rec, bool &history_row);
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** @return a clone of this */
|
||||
dict_index_t* clone() const;
|
||||
/** Clone this index for lazy dropping of the adaptive hash index.
|
||||
@return this or a clone */
|
||||
dict_index_t* clone_if_needed();
|
||||
/** @return number of leaf pages pointed to by the adaptive hash index */
|
||||
inline ulint n_ahi_pages() const;
|
||||
/** @return whether mark_freed() had been invoked */
|
||||
bool freed() const { return UNIV_UNLIKELY(page == 1); }
|
||||
/** Note that the index is waiting for btr_search_lazy_free() */
|
||||
void set_freed() { ut_ad(!freed()); page= 1; }
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/** This ad-hoc class is used by record_size_info only. */
|
||||
class record_size_info_t {
|
||||
public:
|
||||
@@ -1768,6 +1783,11 @@ struct dict_table_t {
|
||||
|
||||
/** List of indexes of the table. */
|
||||
UT_LIST_BASE_NODE_T(dict_index_t) indexes;
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/** List of detached indexes that are waiting to be freed along with
|
||||
the last adaptive hash index entry */
|
||||
UT_LIST_BASE_NODE_T(dict_index_t) freed_indexes;
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/** List of foreign key constraints in the table. These refer to
|
||||
columns in other tables. */
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2013, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2013, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -512,25 +512,13 @@ fsp_reserve_free_extents(
|
||||
@param[in,out] seg_header file segment header
|
||||
@param[in,out] space tablespace
|
||||
@param[in] offset page number
|
||||
@param[in] ahi whether we may need to drop the adaptive
|
||||
hash index
|
||||
@param[in,out] mtr mini-transaction */
|
||||
void
|
||||
fseg_free_page_func(
|
||||
fseg_free_page(
|
||||
fseg_header_t* seg_header,
|
||||
fil_space_t* space,
|
||||
ulint offset,
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi,
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
# define fseg_free_page(header, space, offset, ahi, mtr) \
|
||||
fseg_free_page_func(header, space, offset, ahi, mtr)
|
||||
#else /* BTR_CUR_HASH_ADAPT */
|
||||
# define fseg_free_page(header, space, offset, ahi, mtr) \
|
||||
fseg_free_page_func(header, space, offset, mtr)
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
/** Determine whether a page is free.
|
||||
@param[in,out] space tablespace
|
||||
@param[in] page page number
|
||||
@@ -543,45 +531,25 @@ Frees part of a segment. This function can be used to free a segment
|
||||
by repeatedly calling this function in different mini-transactions.
|
||||
Doing the freeing in a single mini-transaction might result in
|
||||
too big a mini-transaction.
|
||||
@return TRUE if freeing completed */
|
||||
ibool
|
||||
fseg_free_step_func(
|
||||
@return whether the freeing was completed */
|
||||
bool
|
||||
fseg_free_step(
|
||||
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
|
||||
resides on the first page of the frag list
|
||||
of the segment, this pointer becomes obsolete
|
||||
after the last freeing step */
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi, /*!< in: whether we may need to drop
|
||||
the adaptive hash index */
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
# define fseg_free_step(header, ahi, mtr) fseg_free_step_func(header, ahi, mtr)
|
||||
#else /* BTR_CUR_HASH_ADAPT */
|
||||
# define fseg_free_step(header, ahi, mtr) fseg_free_step_func(header, mtr)
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
/**********************************************************************//**
|
||||
Frees part of a segment. Differs from fseg_free_step because this function
|
||||
leaves the header page unfreed.
|
||||
@return TRUE if freeing completed, except the header page */
|
||||
ibool
|
||||
fseg_free_step_not_header_func(
|
||||
@return whether the freeing was completed, except for the header page */
|
||||
bool
|
||||
fseg_free_step_not_header(
|
||||
fseg_header_t* header, /*!< in: segment header which must reside on
|
||||
the first fragment page of the segment */
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
bool ahi, /*!< in: whether we may need to drop
|
||||
the adaptive hash index */
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
# define fseg_free_step_not_header(header, ahi, mtr) \
|
||||
fseg_free_step_not_header_func(header, ahi, mtr)
|
||||
#else /* BTR_CUR_HASH_ADAPT */
|
||||
# define fseg_free_step_not_header(header, ahi, mtr) \
|
||||
fseg_free_step_not_header_func(header, mtr)
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/** Reset the page type.
|
||||
Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE.
|
||||
|
||||
@@ -26,6 +26,9 @@ Created 2012-02-08 by Sunny Bains.
|
||||
|
||||
#include "row0import.h"
|
||||
#include "btr0pcur.h"
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
# include "btr0sea.h"
|
||||
#endif
|
||||
#include "que0que.h"
|
||||
#include "dict0boot.h"
|
||||
#include "dict0load.h"
|
||||
@@ -4013,15 +4016,12 @@ row_import_for_mysql(
|
||||
index entries that point to cached garbage pages in the buffer
|
||||
pool, because PageConverter::operator() only evicted those
|
||||
pages that were replaced by the imported pages. We must
|
||||
discard all remaining adaptive hash index entries, because the
|
||||
detach any remaining adaptive hash index entries, because the
|
||||
adaptive hash index must be a subset of the table contents;
|
||||
false positives are not tolerated. */
|
||||
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||
if (trx_is_interrupted(trx)
|
||||
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||
err = DB_INTERRUPTED;
|
||||
break;
|
||||
}
|
||||
for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); index;
|
||||
index = UT_LIST_GET_NEXT(indexes, index)) {
|
||||
index = index->clone_if_needed();
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
|
||||
@@ -46,6 +46,9 @@ Completed by Sunny Bains and Marko Makela
|
||||
#include "row0vers.h"
|
||||
#include "handler0alter.h"
|
||||
#include "btr0bulk.h"
|
||||
#ifdef BTR_CUR_ADAPT
|
||||
# include "btr0sea.h"
|
||||
#endif /* BTR_CUR_ADAPT */
|
||||
#include "ut0stage.h"
|
||||
#include "fil0crypt.h"
|
||||
|
||||
@@ -203,7 +206,6 @@ public:
|
||||
&ins_cur, 0,
|
||||
__FILE__, __LINE__, &mtr);
|
||||
|
||||
|
||||
error = btr_cur_pessimistic_insert(
|
||||
flag, &ins_cur, &ins_offsets,
|
||||
&row_heap, dtuple, &rec,
|
||||
@@ -3912,6 +3914,9 @@ row_merge_drop_indexes(
|
||||
we should exclude FTS entries from
|
||||
prebuilt->ins_node->entry_list
|
||||
in ins_node_create_entry_list(). */
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
ut_ad(!index->search_info->ref_count);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
dict_index_remove_from_cache(
|
||||
table, index);
|
||||
index = prev;
|
||||
|
||||
@@ -2587,6 +2587,9 @@ row_create_index_for_mysql(
|
||||
unsigned(index->n_nullable));
|
||||
|
||||
err = dict_create_index_tree_in_mem(index, trx);
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
ut_ad(!index->search_info->ref_count);
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
dict_index_remove_from_cache(table, index);
|
||||
@@ -3426,35 +3429,6 @@ row_drop_table_for_mysql(
|
||||
ut_ad(!(table->stats_bg_flag & BG_STAT_IN_PROGRESS));
|
||||
if (!table->no_rollback()) {
|
||||
if (table->space != fil_system.sys_space) {
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
/* On DISCARD TABLESPACE, we would not drop the
|
||||
adaptive hash index entries. If the tablespace is
|
||||
missing here, delete-marking the record in SYS_INDEXES
|
||||
would not free any pages in the buffer pool. Thus,
|
||||
dict_index_remove_from_cache() would hang due to
|
||||
adaptive hash index entries existing in the buffer
|
||||
pool. To prevent this hang, and also to guarantee
|
||||
that btr_search_drop_page_hash_when_freed() will avoid
|
||||
calling btr_search_drop_page_hash_index() while we
|
||||
hold the InnoDB dictionary lock, we will drop any
|
||||
adaptive hash index entries upfront. */
|
||||
const bool immune = is_temp_name
|
||||
|| create_failed
|
||||
|| sqlcom == SQLCOM_CREATE_TABLE
|
||||
|| strstr(table->name.m_name, "/FTS");
|
||||
|
||||
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||
if ((!immune && trx_is_interrupted(trx))
|
||||
|| srv_shutdown_state
|
||||
!= SRV_SHUTDOWN_NONE) {
|
||||
err = DB_INTERRUPTED;
|
||||
table->to_be_dropped = false;
|
||||
dict_table_close(table, true, false);
|
||||
goto funct_exit;
|
||||
}
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
/* Delete the link file if used. */
|
||||
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
|
||||
RemoteDatafile::delete_link_file(name);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2008, Google Inc.
|
||||
Copyright (c) 2015, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2015, 2020, MariaDB Corporation.
|
||||
|
||||
Portions of this file contain modifications contributed and copyrighted by
|
||||
Google, Inc. Those modifications are gratefully acknowledged and are described
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2013, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -365,7 +365,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
|
||||
|
||||
while (!fseg_free_step_not_header(
|
||||
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
|
||||
+ undo_page, false, &mtr)) {
|
||||
+ undo_page, &mtr)) {
|
||||
mutex_exit(&rseg->mutex);
|
||||
|
||||
mtr.commit();
|
||||
@@ -401,7 +401,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
|
||||
fsp0fsp.cc. */
|
||||
|
||||
} while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
|
||||
+ undo_page, false, &mtr));
|
||||
+ undo_page, &mtr));
|
||||
|
||||
const ulint hist_size = mach_read_from_4(rseg_hdr
|
||||
+ TRX_RSEG_HISTORY_SIZE);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2014, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2014, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -844,7 +844,7 @@ trx_undo_free_page(
|
||||
TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + undo_page, mtr);
|
||||
|
||||
fseg_free_page(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + header_page,
|
||||
rseg->space, page_no, false, mtr);
|
||||
rseg->space, page_no, mtr);
|
||||
|
||||
const fil_addr_t last_addr = flst_get_last(
|
||||
TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_page, mtr);
|
||||
@@ -1045,7 +1045,7 @@ trx_undo_seg_free(
|
||||
|
||||
file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
|
||||
|
||||
finished = fseg_free_step(file_seg, false, &mtr);
|
||||
finished = fseg_free_step(file_seg, &mtr);
|
||||
|
||||
if (finished) {
|
||||
/* Update the rseg header */
|
||||
|
||||
Reference in New Issue
Block a user