1
0
mirror of https://github.com/MariaDB/server.git synced 2025-12-09 08:01:34 +03:00

Reduce buffer pool mutex contention under >= 4 big concurrent

CPU-bound SELECT queries.  (Bug #15815)

Fix: replace the mutex by one mutex protecting the 'flush list'
(and the free list) and several mutexes protecting portions of the
buffer pool, where we keep several indivudual LRU lists of pages.

This patch is from Sunny Bains and Heikki Tuuri.
This commit is contained in:
marko
2006-11-08 12:49:15 +00:00
parent b4b9a10700
commit 0011a8a91a
5 changed files with 244 additions and 96 deletions

View File

@@ -221,6 +221,9 @@ in the free list to the frames.
5) When we have AWE enabled, we disable adaptive hash indexes.
*/
/* Value in microseconds */
static const int WAIT_FOR_READ = 20000;
buf_pool_t* buf_pool = NULL; /* The buffer buf_pool of the database */
#ifdef UNIV_DEBUG
@@ -539,6 +542,8 @@ buf_block_init(
block->n_pointers = 0;
mutex_create(&block->mutex, SYNC_BUF_BLOCK);
rw_lock_create(&block->lock, SYNC_LEVEL_VARYING);
ut_ad(rw_lock_validate(&(block->lock)));
@@ -813,8 +818,15 @@ buf_awe_map_page_to_frame(
bck = UT_LIST_GET_LAST(buf_pool->awe_LRU_free_mapped);
while (bck) {
if (bck->state == BUF_BLOCK_FILE_PAGE
&& (bck->buf_fix_count != 0 || bck->io_fix != 0)) {
ibool skip;
mutex_enter(&bck->mutex);
skip = (bck->state == BUF_BLOCK_FILE_PAGE
&& (bck->buf_fix_count != 0 || bck->io_fix != 0));
if (skip) {
mutex_exit(&bck->mutex);
/* We have to skip this */
bck = UT_LIST_GET_PREV(awe_LRU_free_mapped, bck);
@@ -848,6 +860,8 @@ buf_awe_map_page_to_frame(
buf_pool->n_pages_awe_remapped++;
mutex_exit(&bck->mutex);
return;
}
}
@@ -886,13 +900,22 @@ buf_block_make_young(
/*=================*/
buf_block_t* block) /* in: block to make younger */
{
if (buf_pool->freed_page_clock >= block->freed_page_clock
+ 1 + (buf_pool->curr_size / 1024)) {
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */
if (buf_pool->freed_page_clock >= block->freed_page_clock
+ 1 + (buf_pool->curr_size / 4)) {
mutex_enter(&buf_pool->mutex);
/* There has been freeing activity in the LRU list:
best to move to the head of the LRU list */
buf_LRU_make_block_young(block);
mutex_exit(&buf_pool->mutex);
}
}
@@ -927,12 +950,16 @@ buf_block_free(
/*===========*/
buf_block_t* block) /* in, own: block to be freed */
{
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
buf_LRU_block_free_non_file_page(block);
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
}
@@ -1151,9 +1178,8 @@ buf_page_get_gen(
#endif
buf_pool->n_page_gets++;
loop:
mutex_enter_fast(&(buf_pool->mutex));
block = NULL;
mutex_enter_fast(&(buf_pool->mutex));
if (guess) {
block = buf_block_align(guess);
@@ -1191,6 +1217,8 @@ loop:
goto loop;
}
mutex_enter(&block->mutex);
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
must_read = FALSE;
@@ -1200,9 +1228,9 @@ loop:
must_read = TRUE;
if (mode == BUF_GET_IF_IN_POOL) {
/* The page is only being read to buffer */
mutex_exit(&(buf_pool->mutex));
mutex_exit(&buf_pool->mutex);
mutex_exit(&block->mutex);
return(NULL);
}
@@ -1226,7 +1254,7 @@ loop:
#else
buf_block_buf_fix_inc(block);
#endif
buf_block_make_young(block);
mutex_exit(&buf_pool->mutex);
/* Check if this is the first access to the page */
@@ -1234,10 +1262,13 @@ loop:
block->accessed = TRUE;
mutex_exit(&block->mutex);
buf_block_make_young(block);
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a(block->file_page_was_freed == FALSE);
#endif
mutex_exit(&(buf_pool->mutex));
#ifdef UNIV_DEBUG
buf_dbg_counter++;
@@ -1262,13 +1293,14 @@ loop:
}
if (!success) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
mutex_exit(&(buf_pool->mutex));
return(NULL);
}
@@ -1279,18 +1311,16 @@ loop:
completes */
for (;;) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
if (block->io_fix == BUF_IO_READ) {
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
/* Sleep 20 milliseconds */
os_thread_sleep(20000);
os_thread_sleep(WAIT_FOR_READ);
} else {
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
break;
}
@@ -1349,14 +1379,14 @@ buf_page_optimistic_get_func(
ut_ad(mtr && block);
ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));
mutex_enter(&(buf_pool->mutex));
/* If AWE is used, block may have a different frame now, e.g., NULL */
mutex_enter(&block->mutex);
if (UNIV_UNLIKELY(block->state != BUF_BLOCK_FILE_PAGE)
|| UNIV_UNLIKELY(block->frame != guess)) {
exit_func:
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
return(FALSE);
}
@@ -1366,16 +1396,15 @@ exit_func:
#else
buf_block_buf_fix_inc(block);
#endif
accessed = block->accessed;
block->accessed = TRUE;
mutex_exit(&block->mutex);
buf_block_make_young(block);
/* Check if this is the first access to the page */
accessed = block->accessed;
block->accessed = TRUE;
mutex_exit(&(buf_pool->mutex));
ut_ad(!ibuf_inside() || ibuf_page(block->space, block->offset));
if (rw_latch == RW_S_LATCH) {
@@ -1389,13 +1418,16 @@ exit_func:
}
if (UNIV_UNLIKELY(!success)) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
goto exit_func;
return(FALSE);
}
if (UNIV_UNLIKELY(!UT_DULINT_EQ(modify_clock, block->modify_clock))) {
@@ -1408,13 +1440,16 @@ exit_func:
rw_lock_x_unlock(&(block->lock));
}
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
goto exit_func;
return(FALSE);
}
mtr_memo_push(mtr, block, fix_type);
@@ -1471,10 +1506,10 @@ buf_page_get_known_nowait(
ut_ad(mtr);
ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));
mutex_enter(&(buf_pool->mutex));
block = buf_block_align(guess);
mutex_enter(&block->mutex);
if (block->state == BUF_BLOCK_REMOVE_HASH) {
/* Another thread is just freeing the block from the LRU list
of the buffer pool: do not try to access this page; this
@@ -1483,7 +1518,7 @@ buf_page_get_known_nowait(
we have already removed it from the page address hash table
of the buffer pool. */
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
return(FALSE);
}
@@ -1495,12 +1530,12 @@ buf_page_get_known_nowait(
#else
buf_block_buf_fix_inc(block);
#endif
mutex_exit(&block->mutex);
if (mode == BUF_MAKE_YOUNG) {
buf_block_make_young(block);
}
mutex_exit(&(buf_pool->mutex));
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
if (rw_latch == RW_S_LATCH) {
@@ -1514,13 +1549,15 @@ buf_page_get_known_nowait(
}
if (!success) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
mutex_exit(&(buf_pool->mutex));
return(FALSE);
}
@@ -1568,7 +1605,6 @@ buf_page_init_for_backup_restore(
block->offset = offset;
block->lock_hash_val = 0;
block->lock_mutex = NULL;
block->freed_page_clock = 0;
@@ -1601,6 +1637,7 @@ buf_page_init(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&(block->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
@@ -1615,7 +1652,6 @@ buf_page_init(
block->index = NULL;
block->lock_hash_val = lock_rec_hash(space, offset);
block->lock_mutex = NULL;
/* Insert into the hash table of file pages */
@@ -1709,6 +1745,7 @@ buf_page_init_for_read(
ut_a(block);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
if (fil_tablespace_deleted_or_being_deleted_in_mem(
space, tablespace_version)) {
@@ -1722,7 +1759,9 @@ buf_page_init_for_read(
deleted or is being deleted, or the page is
already in buf_pool, return */
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
buf_block_free(block);
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
@@ -1742,6 +1781,7 @@ buf_page_init_for_read(
buf_LRU_add_block(block, TRUE); /* TRUE == to old blocks */
block->io_fix = BUF_IO_READ;
buf_pool->n_pend_reads++;
/* We set a pass-type x-lock on the frame because then the same
@@ -1753,6 +1793,7 @@ buf_page_init_for_read(
rw_lock_x_lock_gen(&(block->lock), BUF_IO_READ);
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
@@ -1817,6 +1858,8 @@ buf_page_create(
block = free_block;
mutex_enter(&block->mutex);
buf_page_init(space, offset, block);
/* The block must be put to the LRU list */
@@ -1827,13 +1870,15 @@ buf_page_create(
#else
buf_block_buf_fix_inc(block);
#endif
buf_pool->n_pages_created++;
mutex_exit(&(buf_pool->mutex));
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
block->accessed = TRUE;
buf_pool->n_pages_created++;
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
/* Delete possible entries for the page from the insert buffer:
such can exist if the page belonged to an index which was dropped */
@@ -1885,6 +1930,12 @@ buf_page_io_complete(
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
/* We do not need protect block->io_fix here by block->mutex to read
it because this is the only function where we can change the value
from BUF_IO_READ or BUF_IO_WRITE to some other value, and our code
ensures that this is the only thread that handles the i/o for this
block. */
io_type = block->io_fix;
if (io_type == BUF_IO_READ) {
@@ -1986,11 +2037,12 @@ buf_page_io_complete(
}
}
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
#ifdef UNIV_IBUF_DEBUG
ut_a(ibuf_count_get(block->space, block->offset) == 0);
#endif
mutex_enter(&(buf_pool->mutex));
/* Because this thread which does the unlocking is not the same that
did the locking, we use a pass value != 0 in unlock, which simply
removes the newest lock debug record, without checking the thread
@@ -2033,6 +2085,7 @@ buf_page_io_complete(
#endif /* UNIV_DEBUG */
}
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
#ifdef UNIV_DEBUG
@@ -2095,6 +2148,8 @@ buf_validate(void)
block = buf_pool_get_nth_block(buf_pool, i);
mutex_enter(&block->mutex);
if (block->state == BUF_BLOCK_FILE_PAGE) {
ut_a(buf_page_hash_get(block->space,
@@ -2139,6 +2194,8 @@ buf_validate(void)
} else if (block->state == BUF_BLOCK_NOT_USED) {
n_free++;
}
mutex_exit(&block->mutex);
}
if (n_lru + n_free > buf_pool->curr_size) {
@@ -2286,9 +2343,14 @@ buf_get_latched_pages_number(void)
block = buf_pool_get_nth_block(buf_pool, i);
if (((block->buf_fix_count != 0) || (block->io_fix != 0))
&& block->magic_n == BUF_BLOCK_MAGIC_N) {
fixed_pages_number++;
if (block->magic_n == BUF_BLOCK_MAGIC_N) {
mutex_enter(&block->mutex);
if (block->buf_fix_count != 0 || block->io_fix != 0) {
fixed_pages_number++;
}
mutex_exit(&block->mutex);
}
}
@@ -2458,6 +2520,8 @@ buf_all_freed(void)
block = buf_pool_get_nth_block(buf_pool, i);
mutex_enter(&block->mutex);
if (block->state == BUF_BLOCK_FILE_PAGE) {
if (!buf_flush_ready_for_replace(block)) {
@@ -2469,6 +2533,8 @@ buf_all_freed(void)
ut_error;
}
}
mutex_exit(&block->mutex);
}
mutex_exit(&(buf_pool->mutex));