diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 4a5a247b022..3021bb71929 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -1845,6 +1845,7 @@ btr_cur_update_in_place( roll_ptr_t roll_ptr = 0; trx_t* trx; ulint was_delete_marked; + ibool is_hashed; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; @@ -1886,7 +1887,21 @@ btr_cur_update_in_place( return(err); } - if (block->is_hashed) { + if (!(flags & BTR_KEEP_SYS_FLAG)) { + row_upd_rec_sys_fields(rec, NULL, + index, offsets, trx, roll_ptr); + } + + was_delete_marked = rec_get_deleted_flag( + rec, page_is_comp(buf_block_get_frame(block))); + + is_hashed = block->is_hashed; + + if (is_hashed) { + /* TO DO: Can we skip this if none of the fields + index->search_info->curr_n_fields + are being updated? */ + /* The function row_upd_changes_ord_field_binary works only if the update vector was built for a clustered index, we must NOT call it if index is secondary */ @@ -1902,17 +1917,9 @@ btr_cur_update_in_place( rw_lock_x_lock(&btr_search_latch); } - if (!(flags & BTR_KEEP_SYS_FLAG)) { - row_upd_rec_sys_fields(rec, NULL, - index, offsets, trx, roll_ptr); - } - - was_delete_marked = rec_get_deleted_flag( - rec, page_is_comp(buf_block_get_frame(block))); - row_upd_rec_in_place(rec, index, offsets, update, page_zip); - if (block->is_hashed) { + if (is_hashed) { rw_lock_x_unlock(&btr_search_latch); } @@ -2638,7 +2645,8 @@ btr_cur_parse_del_mark_set_clust_rec( /* We do not need to reserve btr_search_latch, as the page is only being recovered, and there cannot be a hash index to - it. */ + it. Besides, these fields are being updated in place + and the adaptive hash index does not depend on them. */ btr_rec_set_deleted_flag(rec, page_zip, val); @@ -2718,9 +2726,9 @@ btr_cur_del_mark_set_clust_rec( return(err); } - if (block->is_hashed) { - rw_lock_x_lock(&btr_search_latch); - } + /* The btr_search_latch is not needed here, because + the adaptive hash index does not depend on the delete-mark + and the delete-mark is being updated in place. */ page_zip = buf_block_get_page_zip(block); @@ -2734,10 +2742,6 @@ btr_cur_del_mark_set_clust_rec( index, offsets, trx, roll_ptr); } - if (block->is_hashed) { - rw_lock_x_unlock(&btr_search_latch); - } - btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx, roll_ptr, mtr); @@ -2813,7 +2817,8 @@ btr_cur_parse_del_mark_set_sec_rec( /* We do not need to reserve btr_search_latch, as the page is only being recovered, and there cannot be a hash index to - it. */ + it. Besides, the delete-mark flag is being updated in place + and the adaptive hash index does not depend on it. */ btr_rec_set_deleted_flag(rec, page_zip, val); } @@ -2861,16 +2866,11 @@ btr_cur_del_mark_set_sec_rec( ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(cursor->index->table)); - if (block->is_hashed) { - rw_lock_x_lock(&btr_search_latch); - } - + /* We do not need to reserve btr_search_latch, as the + delete-mark flag is being updated in place and the adaptive + hash index does not depend on it. */ btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val); - if (block->is_hashed) { - rw_lock_x_unlock(&btr_search_latch); - } - btr_cur_del_mark_set_sec_rec_log(rec, val, mtr); return(DB_SUCCESS); @@ -2891,8 +2891,11 @@ btr_cur_set_deleted_flag_for_ibuf( ibool val, /*!< in: value to set */ mtr_t* mtr) /*!< in: mtr */ { - /* We do not need to reserve btr_search_latch, as the page has just - been read to the buffer pool and there cannot be a hash index to it. */ + /* We do not need to reserve btr_search_latch, as the page + has just been read to the buffer pool and there cannot be + a hash index to it. Besides, the delete-mark flag is being + updated in place and the adaptive hash index does not depend + on it. */ btr_rec_set_deleted_flag(rec, page_zip, val); diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index 09b7820ae41..e0b9e979970 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -2990,19 +2990,20 @@ buf_page_init_low( /********************************************************************//** Inits a page to the buffer buf_pool. */ -static +static __attribute__((nonnull)) void buf_page_init( /*==========*/ + buf_pool_t* buf_pool,/*!< in/out: buffer pool */ ulint space, /*!< in: space id */ ulint offset, /*!< in: offset of the page within space in units of a page */ ulint fold, /*!< in: buf_page_address_fold(space,offset) */ - buf_block_t* block) /*!< in: block to init */ + buf_block_t* block) /*!< in/out: block to init */ { buf_page_t* hash_page; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + ut_ad(buf_pool == buf_pool_get(space, offset)); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(mutex_own(&(block->mutex))); ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE); @@ -3161,7 +3162,7 @@ err_exit: ut_ad(buf_pool_from_bpage(bpage) == buf_pool); - buf_page_init(space, offset, fold, block); + buf_page_init(buf_pool, space, offset, fold, block); /* The block must be put to the LRU list, to the old blocks */ buf_LRU_add_block(bpage, TRUE/* to old blocks */); @@ -3365,7 +3366,7 @@ buf_page_create( mutex_enter(&block->mutex); - buf_page_init(space, offset, fold, block); + buf_page_init(buf_pool, space, offset, fold, block); /* The block must be put to the LRU list */ buf_LRU_add_block(&block->page, FALSE); diff --git a/storage/innobase/ibuf/ibuf0ibuf.c b/storage/innobase/ibuf/ibuf0ibuf.c index 718f2082ce9..054a2d364b6 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.c +++ b/storage/innobase/ibuf/ibuf0ibuf.c @@ -2210,15 +2210,15 @@ ibuf_add_free_page(void) buf_block_t* block = buf_page_get( IBUF_SPACE_ID, 0, page_no, RW_X_LATCH, &mtr); + ibuf_enter(&mtr); + + mutex_enter(&ibuf_mutex); + buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); page = buf_block_get_frame(block); } - ibuf_enter(&mtr); - - mutex_enter(&ibuf_mutex); - root = ibuf_tree_root_get(&mtr); /* Add the page to the free list and update the ibuf size data */ diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index 346f65302f7..ad1445b3935 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -68,10 +68,7 @@ typedef byte page_header_t; #define PAGE_MAX_TRX_ID 18 /* highest id of a trx which may have modified a record on the page; trx_id_t; defined only in secondary indexes and in the insert buffer - tree; NOTE: this may be modified only - when the thread has an x-latch to the page, - and ALSO an x-latch to btr_search_latch - if there is a hash index to the page! */ + tree */ #define PAGE_HEADER_PRIV_END 26 /* end of private data structure of the page header which are set in a page create */ /*----*/ diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic index 0894ed373b0..11db82f64da 100644 --- a/storage/innobase/include/row0upd.ic +++ b/storage/innobase/include/row0upd.ic @@ -157,11 +157,6 @@ row_upd_rec_sys_fields( { ut_ad(dict_index_is_clust(index)); ut_ad(rec_offs_validate(rec, index, offsets)); -#ifdef UNIV_SYNC_DEBUG - if (!rw_lock_own(&btr_search_latch, RW_LOCK_EX)) { - ut_ad(!buf_block_align(rec)->is_hashed); - } -#endif /* UNIV_SYNC_DEBUG */ if (UNIV_LIKELY_NULL(page_zip)) { ulint pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);