From deba087af08a53dad962c551fcbd6cf0c0cc5180 Mon Sep 17 00:00:00 2001 From: Joerg Bruehe Date: Wed, 28 Sep 2011 11:43:16 +0200 Subject: [PATCH 1/4] Fix the spec file: Files must not be mentioned twice in a "%files" list. --- support-files/mysql.spec.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 1442e1b96bd..009d38fde65 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -1002,7 +1002,6 @@ echo "=====" >> $STATUS_HISTORY %doc %attr(644, root, man) %{_mandir}/man1/replace.1* %doc %attr(644, root, man) %{_mandir}/man1/resolve_stack_dump.1* %doc %attr(644, root, man) %{_mandir}/man1/resolveip.1* -%doc %attr(644, root, man) %{_mandir}/man1/mysql_plugin.1* %ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf @@ -1020,7 +1019,6 @@ echo "=====" >> $STATUS_HISTORY %attr(755, root, root) %{_bindir}/mysql_setpermission %attr(755, root, root) %{_bindir}/mysql_tzinfo_to_sql %attr(755, root, root) %{_bindir}/mysql_upgrade -%attr(755, root, root) %{_bindir}/mysql_plugin %attr(755, root, root) %{_bindir}/mysql_zap %attr(755, root, root) %{_bindir}/mysqlbug %attr(755, root, root) %{_bindir}/mysqld_multi @@ -1131,6 +1129,11 @@ echo "=====" >> $STATUS_HISTORY # merging BK trees) ############################################################################## %changelog +* Wed Sep 28 2011 Joerg Bruehe + +- Fix duplicate mentioning of "mysql_plugin" and its manual page, + it is better to keep alphabetic order in the files list (merging!). + * Tue Sep 13 2011 Jonathan Perkin - Add support for Oracle Linux 6 and Red Hat Enterprise Linux 6. Due to From fdfea7a8180ef5d97d61038bcec75fed6f892fbc Mon Sep 17 00:00:00 2001 From: Joerg Bruehe Date: Fri, 30 Sep 2011 13:27:29 +0200 Subject: [PATCH 2/4] Transferring a change from main 5.5 into the 5.5.17 build: | revision-id: rafal.somla@oracle.com-20110929150216-jl3y54s54w04y2vu | parent: marko.makela@oracle.com-20110929123146-03lcg1vixncyviyn | committer: Rafal Somla | branch nick: bug12982926 | timestamp: Thu 2011-09-29 17:02:16 +0200 | message: | Bug#12982926 CLIENT CAN OVERRIDE ZERO-LENGTH-ALLOCATE BUFFER | | Changes in client plugin needed for testing the issue (test instrumentation). --- libmysql/authentication_win/handshake_client.cc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/libmysql/authentication_win/handshake_client.cc b/libmysql/authentication_win/handshake_client.cc index 565726651cb..02e5483da29 100644 --- a/libmysql/authentication_win/handshake_client.cc +++ b/libmysql/authentication_win/handshake_client.cc @@ -161,6 +161,21 @@ int Handshake_client::write_packet(Blob &data) keep all the data. */ unsigned block_count= data.len()/512 + ((data.len() % 512) ? 1 : 0); + +#if !defined(DBUG_OFF) && defined(WINAUTH_USE_DBUG_LIB) + + /* + For testing purposes, use wrong block count to see how server + handles this. + */ + DBUG_EXECUTE_IF("winauth_first_packet_test",{ + block_count= data.len() == 601 ? 0 : + data.len() == 602 ? 1 : + block_count; + }); + +#endif + DBUG_ASSERT(block_count < (unsigned)0x100); saved_byte= data[254]; data[254] = block_count; From 3fb8fe5eb27356c147ac39ae18de3d3e682918b8 Mon Sep 17 00:00:00 2001 From: Joerg Bruehe Date: Fri, 30 Sep 2011 13:37:22 +0200 Subject: [PATCH 3/4] Transferring a change from main 5.5 into the 5.5.17 build: | revision-id: inaam.rana@oracle.com-20110930110219-vnpaqghj9hm0grds | parent: rohit.kalhans@oracle.com-20110930094635-hjhrv55tg6z6pz7y | committer: Inaam Rana | branch nick: mysql-5.5 | timestamp: Fri 2011-09-30 07:02:19 -0400 | message: | Revert original fix for Bug 12612184 and the follow up fix for | Bug 12704861. | | Bug 12704861 fix was revno: 3504.1.1 (rb://693) | Bug 12612184 fix was revno: 3445.1.10 (rb://678) --- storage/innobase/btr/btr0btr.c | 272 ++++---------------------- storage/innobase/btr/btr0cur.c | 140 ++++--------- storage/innobase/buf/buf0buf.c | 25 +++ storage/innobase/fsp/fsp0fsp.c | 234 +++++++++------------- storage/innobase/include/btr0btr.h | 49 +---- storage/innobase/include/btr0cur.h | 44 ++--- storage/innobase/include/btr0cur.ic | 4 +- storage/innobase/include/buf0buf.h | 36 +--- storage/innobase/include/buf0buf.ic | 17 +- storage/innobase/include/fsp0fsp.h | 30 ++- storage/innobase/include/mtr0mtr.h | 15 +- storage/innobase/include/mtr0mtr.ic | 8 +- storage/innobase/include/page0cur.ic | 5 +- storage/innobase/include/page0page.h | 39 +--- storage/innobase/include/page0page.ic | 32 +-- storage/innobase/include/rem0rec.h | 4 +- storage/innobase/include/rem0rec.ic | 4 +- storage/innobase/include/sync0rw.ic | 10 +- storage/innobase/include/sync0sync.h | 6 +- storage/innobase/include/univ.i | 5 +- storage/innobase/mtr/mtr0mtr.c | 7 +- storage/innobase/page/page0cur.c | 17 +- storage/innobase/page/page0page.c | 52 ++--- storage/innobase/row/row0ins.c | 61 +----- storage/innobase/row/row0row.c | 28 +-- storage/innobase/row/row0upd.c | 40 ++-- storage/innobase/row/row0vers.c | 16 +- storage/innobase/sync/sync0rw.c | 4 +- storage/innobase/sync/sync0sync.c | 12 +- storage/innobase/trx/trx0rec.c | 4 +- storage/innobase/trx/trx0undo.c | 4 +- 31 files changed, 366 insertions(+), 858 deletions(-) diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c index 6a6f0025fa5..b476167af29 100644 --- a/storage/innobase/btr/btr0btr.c +++ b/storage/innobase/btr/btr0btr.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -906,29 +906,28 @@ btr_page_alloc_for_ibuf( /**************************************************************//** Allocates a new file page to be used in an index tree. NOTE: we assume that the caller has made the reservation for free extents! -@return allocated page number, FIL_NULL if out of space */ -static __attribute__((nonnull(1,5), warn_unused_result)) -ulint -btr_page_alloc_low( -/*===============*/ +@return new allocated block, x-latched; NULL if out of space */ +UNIV_INTERN +buf_block_t* +btr_page_alloc( +/*===========*/ dict_index_t* index, /*!< in: index */ ulint hint_page_no, /*!< in: hint of a good page */ byte file_direction, /*!< in: direction where a possible page split is made */ ulint level, /*!< in: level where the page is placed in the tree */ - mtr_t* mtr, /*!< in/out: mini-transaction - for the allocation */ - mtr_t* init_mtr) /*!< in/out: mini-transaction - in which the page should be - initialized (may be the same - as mtr), or NULL if it should - not be initialized (the page - at hint was previously freed - in mtr) */ + mtr_t* mtr) /*!< in: mtr */ { fseg_header_t* seg_header; page_t* root; + buf_block_t* new_block; + ulint new_page_no; + + if (dict_index_is_ibuf(index)) { + + return(btr_page_alloc_for_ibuf(index, mtr)); + } root = btr_root_get(index, mtr); @@ -942,42 +941,8 @@ btr_page_alloc_low( reservation for free extents, and thus we know that a page can be allocated: */ - return(fseg_alloc_free_page_general( - seg_header, hint_page_no, file_direction, - TRUE, mtr, init_mtr)); -} - -/**************************************************************//** -Allocates a new file page to be used in an index tree. NOTE: we assume -that the caller has made the reservation for free extents! -@return new allocated block, x-latched; NULL if out of space */ -UNIV_INTERN -buf_block_t* -btr_page_alloc( -/*===========*/ - dict_index_t* index, /*!< in: index */ - ulint hint_page_no, /*!< in: hint of a good page */ - byte file_direction, /*!< in: direction where a possible - page split is made */ - ulint level, /*!< in: level where the page is placed - in the tree */ - mtr_t* mtr, /*!< in/out: mini-transaction - for the allocation */ - mtr_t* init_mtr) /*!< in/out: mini-transaction - for x-latching and initializing - the page */ -{ - buf_block_t* new_block; - ulint new_page_no; - - if (dict_index_is_ibuf(index)) { - - return(btr_page_alloc_for_ibuf(index, mtr)); - } - - new_page_no = btr_page_alloc_low( - index, hint_page_no, file_direction, level, mtr, init_mtr); - + new_page_no = fseg_alloc_free_page_general(seg_header, hint_page_no, + file_direction, TRUE, mtr); if (new_page_no == FIL_NULL) { return(NULL); @@ -985,16 +950,9 @@ btr_page_alloc( new_block = buf_page_get(dict_index_get_space(index), dict_table_zip_size(index->table), - new_page_no, RW_X_LATCH, init_mtr); + new_page_no, RW_X_LATCH, mtr); buf_block_dbg_add_level(new_block, SYNC_TREE_NODE_NEW); - if (mtr->freed_clust_leaf) { - mtr_memo_release(mtr, new_block, MTR_MEMO_FREE_CLUST_LEAF); - ut_ad(!mtr_memo_contains(mtr, new_block, - MTR_MEMO_FREE_CLUST_LEAF)); - } - - ut_ad(btr_freed_leaves_validate(mtr)); return(new_block); } @@ -1107,15 +1065,6 @@ btr_page_free_low( fseg_free_page(seg_header, buf_block_get_space(block), buf_block_get_page_no(block), mtr); - - /* The page was marked free in the allocation bitmap, but it - should remain buffer-fixed until mtr_commit(mtr) or until it - is explicitly freed from the mini-transaction. */ - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - /* TODO: Discard any operations on the page from the redo log - and remove the block from the flush list and the buffer pool. - This would free up buffer pool earlier and reduce writes to - both the tablespace and the redo log. */ } /**************************************************************//** @@ -1129,140 +1078,13 @@ btr_page_free( buf_block_t* block, /*!< in: block to be freed, x-latched */ mtr_t* mtr) /*!< in: mtr */ { - const page_t* page = buf_block_get_frame(block); - ulint level = btr_page_get_level(page, mtr); + ulint level; + + level = btr_page_get_level(buf_block_get_frame(block), mtr); - ut_ad(fil_page_get_type(block->frame) == FIL_PAGE_INDEX); btr_page_free_low(index, block, level, mtr); - - /* The handling of MTR_MEMO_FREE_CLUST_LEAF assumes this. */ - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - - if (level == 0 && dict_index_is_clust(index)) { - /* We may have to call btr_mark_freed_leaves() to - temporarily mark the block nonfree for invoking - btr_store_big_rec_extern_fields_func() after an - update. Remember that the block was freed. */ - mtr->freed_clust_leaf = TRUE; - mtr_memo_push(mtr, block, MTR_MEMO_FREE_CLUST_LEAF); - } - - ut_ad(btr_freed_leaves_validate(mtr)); } -/**************************************************************//** -Marks all MTR_MEMO_FREE_CLUST_LEAF pages nonfree or free. -For invoking btr_store_big_rec_extern_fields() after an update, -we must temporarily mark freed clustered index pages allocated, so -that off-page columns will not be allocated from them. Between the -btr_store_big_rec_extern_fields() and mtr_commit() we have to -mark the pages free again, so that no pages will be leaked. */ -UNIV_INTERN -void -btr_mark_freed_leaves( -/*==================*/ - dict_index_t* index, /*!< in/out: clustered index */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - ibool nonfree)/*!< in: TRUE=mark nonfree, FALSE=mark freed */ -{ - /* This is loosely based on mtr_memo_release(). */ - - ulint offset; - - ut_ad(dict_index_is_clust(index)); - ut_ad(mtr->magic_n == MTR_MAGIC_N); - ut_ad(mtr->state == MTR_ACTIVE); - - if (!mtr->freed_clust_leaf) { - return; - } - - offset = dyn_array_get_data_size(&mtr->memo); - - while (offset > 0) { - mtr_memo_slot_t* slot; - buf_block_t* block; - - offset -= sizeof *slot; - - slot = dyn_array_get_element(&mtr->memo, offset); - - if (slot->type != MTR_MEMO_FREE_CLUST_LEAF) { - continue; - } - - /* Because btr_page_alloc() does invoke - mtr_memo_release on MTR_MEMO_FREE_CLUST_LEAF, all - blocks tagged with MTR_MEMO_FREE_CLUST_LEAF in the - memo must still be clustered index leaf tree pages. */ - block = slot->object; - ut_a(buf_block_get_space(block) - == dict_index_get_space(index)); - ut_a(fil_page_get_type(buf_block_get_frame(block)) - == FIL_PAGE_INDEX); - ut_a(page_is_leaf(buf_block_get_frame(block))); - - if (nonfree) { - /* Allocate the same page again. */ - ulint page_no; - page_no = btr_page_alloc_low( - index, buf_block_get_page_no(block), - FSP_NO_DIR, 0, mtr, NULL); - ut_a(page_no == buf_block_get_page_no(block)); - } else { - /* Assert that the page is allocated and free it. */ - btr_page_free_low(index, block, 0, mtr); - } - } - - ut_ad(btr_freed_leaves_validate(mtr)); -} - -#ifdef UNIV_DEBUG -/**************************************************************//** -Validates all pages marked MTR_MEMO_FREE_CLUST_LEAF. -@see btr_mark_freed_leaves() -@return TRUE */ -UNIV_INTERN -ibool -btr_freed_leaves_validate( -/*======================*/ - mtr_t* mtr) /*!< in: mini-transaction */ -{ - ulint offset; - - ut_ad(mtr->magic_n == MTR_MAGIC_N); - ut_ad(mtr->state == MTR_ACTIVE); - - offset = dyn_array_get_data_size(&mtr->memo); - - while (offset > 0) { - const mtr_memo_slot_t* slot; - const buf_block_t* block; - - offset -= sizeof *slot; - - slot = dyn_array_get_element(&mtr->memo, offset); - - if (slot->type != MTR_MEMO_FREE_CLUST_LEAF) { - continue; - } - - ut_a(mtr->freed_clust_leaf); - /* Because btr_page_alloc() does invoke - mtr_memo_release on MTR_MEMO_FREE_CLUST_LEAF, all - blocks tagged with MTR_MEMO_FREE_CLUST_LEAF in the - memo must still be clustered index leaf tree pages. */ - block = slot->object; - ut_a(fil_page_get_type(buf_block_get_frame(block)) - == FIL_PAGE_INDEX); - ut_a(page_is_leaf(buf_block_get_frame(block))); - } - - return(TRUE); -} -#endif /* UNIV_DEBUG */ - /**************************************************************//** Sets the child node file address in a node pointer. */ UNIV_INLINE @@ -1987,7 +1809,7 @@ btr_root_raise_and_insert( level = btr_page_get_level(root, mtr); - new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr, mtr); + new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr); new_page = buf_block_get_frame(new_block); new_page_zip = buf_block_get_page_zip(new_block); ut_a(!new_page_zip == !root_page_zip); @@ -2458,7 +2280,7 @@ btr_attach_half_pages( /*==================*/ dict_index_t* index, /*!< in: the index tree */ buf_block_t* block, /*!< in/out: page to be split */ - const rec_t* split_rec, /*!< in: first record on upper + rec_t* split_rec, /*!< in: first record on upper half page */ buf_block_t* new_block, /*!< in/out: the new half page */ ulint direction, /*!< in: FSP_UP or FSP_DOWN */ @@ -2723,7 +2545,7 @@ func_start: /* 2. Allocate a new page to the index */ new_block = btr_page_alloc(cursor->index, hint_page_no, direction, - btr_page_get_level(page, mtr), mtr, mtr); + btr_page_get_level(page, mtr), mtr); new_page = buf_block_get_frame(new_block); new_page_zip = buf_block_get_page_zip(new_block); btr_page_create(new_block, new_page_zip, cursor->index, @@ -3173,16 +2995,15 @@ btr_node_ptr_delete( ut_a(err == DB_SUCCESS); if (!compressed) { - btr_cur_compress_if_useful(&cursor, FALSE, mtr); + btr_cur_compress_if_useful(&cursor, mtr); } } /*************************************************************//** If page is the only on its level, this function moves its records to the -father page, thus reducing the tree height. -@return father block */ +father page, thus reducing the tree height. */ static -buf_block_t* +void btr_lift_page_up( /*=============*/ dict_index_t* index, /*!< in: index tree */ @@ -3299,8 +3120,6 @@ btr_lift_page_up( } ut_ad(page_validate(father_page, index)); ut_ad(btr_check_node_ptr(index, father_block, mtr)); - - return(father_block); } /*************************************************************//** @@ -3317,13 +3136,11 @@ UNIV_INTERN ibool btr_compress( /*=========*/ - btr_cur_t* cursor, /*!< in/out: cursor on the page to merge - or lift; the page must not be empty: - when deleting records, use btr_discard_page() - if the page would become empty */ - ibool adjust, /*!< in: TRUE if should adjust the - cursor position even if compression occurs */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + btr_cur_t* cursor, /*!< in: cursor on the page to merge or lift; + the page must not be empty: in record delete + use btr_discard_page if the page would become + empty */ + mtr_t* mtr) /*!< in: mtr */ { dict_index_t* index; ulint space; @@ -3341,14 +3158,12 @@ btr_compress( ulint* offsets; ulint data_size; ulint n_recs; - ulint nth_rec = 0; /* remove bogus warning */ ulint max_ins_size; ulint max_ins_size_reorg; block = btr_cur_get_block(cursor); page = btr_cur_get_page(cursor); index = btr_cur_get_index(cursor); - ut_a((ibool) !!page_is_comp(page) == dict_table_is_comp(index->table)); ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), @@ -3369,10 +3184,6 @@ btr_compress( offsets = btr_page_get_father_block(NULL, heap, index, block, mtr, &father_cursor); - if (adjust) { - nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor)); - } - /* Decide the page to which we try to merge and which will inherit the locks */ @@ -3399,9 +3210,9 @@ btr_compress( } else { /* The page is the only one on the level, lift the records to the father */ - - merge_block = btr_lift_page_up(index, block, mtr); - goto func_exit; + btr_lift_page_up(index, block, mtr); + mem_heap_free(heap); + return(TRUE); } n_recs = page_get_n_recs(page); @@ -3483,10 +3294,6 @@ err_exit: btr_node_ptr_delete(index, block, mtr); lock_update_merge_left(merge_block, orig_pred, block); - - if (adjust) { - nth_rec += page_rec_get_n_recs_before(orig_pred); - } } else { rec_t* orig_succ; #ifdef UNIV_BTR_DEBUG @@ -3551,6 +3358,7 @@ err_exit: } btr_blob_dbg_remove(page, index, "btr_compress"); + mem_heap_free(heap); if (!dict_index_is_clust(index) && page_is_leaf(merge_page)) { /* Update the free bits of the B-tree page in the @@ -3602,16 +3410,6 @@ err_exit: btr_page_free(index, block, mtr); ut_ad(btr_check_node_ptr(index, merge_block, mtr)); -func_exit: - mem_heap_free(heap); - - if (adjust) { - btr_cur_position( - index, - page_rec_get_nth(merge_block->frame, nth_rec), - merge_block, cursor); - } - return(TRUE); } diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 3021bb71929..f63ca20ef24 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -1985,6 +1985,7 @@ btr_cur_optimistic_update( ulint old_rec_size; dtuple_t* new_entry; roll_ptr_t roll_ptr; + trx_t* trx; mem_heap_t* heap; ulint i; ulint n_ext; @@ -2001,10 +2002,9 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG - ut_a(!rec_offs_any_null_extern(rec, offsets) - || trx_is_recv(thr_get_trx(thr))); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#ifdef UNIV_BLOB_NULL_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_BLOB_NULL_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { @@ -2127,11 +2127,13 @@ any_extern: page_cur_move_to_prev(page_cursor); + trx = thr_get_trx(thr); + if (!(flags & BTR_KEEP_SYS_FLAG)) { row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, roll_ptr); row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, - thr_get_trx(thr)->id); + trx->id); } /* There are no externally stored columns in new_entry */ @@ -2217,9 +2219,7 @@ btr_cur_pessimistic_update( /*=======================*/ ulint flags, /*!< in: undo logging, locking, and rollback flags */ - btr_cur_t* cursor, /*!< in/out: cursor on the record to update; - cursor may become invalid if *big_rec == NULL - || !(flags & BTR_KEEP_POS_FLAG) */ + btr_cur_t* cursor, /*!< in: cursor on the record to update */ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to be stored externally by the caller, or NULL */ @@ -2358,7 +2358,7 @@ btr_cur_pessimistic_update( record to be inserted: we have to remember which fields were such */ ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec)); - ut_ad(rec_offs_validate(rec, index, offsets)); + offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, heap); n_ext += btr_push_update_extern_fields(new_entry, update, *heap); if (UNIV_LIKELY_NULL(page_zip)) { @@ -2381,10 +2381,6 @@ make_external: err = DB_TOO_BIG_RECORD; goto return_after_reservations; } - - ut_ad(page_is_leaf(page)); - ut_ad(dict_index_is_clust(index)); - ut_ad(flags & BTR_KEEP_POS_FLAG); } /* Store state of explicit locks on rec on the page infimum record, @@ -2412,8 +2408,6 @@ make_external: rec = btr_cur_insert_if_possible(cursor, new_entry, n_ext, mtr); if (rec) { - page_cursor->rec = rec; - lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor), rec, block); @@ -2427,10 +2421,7 @@ make_external: rec, index, offsets, mtr); } - btr_cur_compress_if_useful( - cursor, - big_rec_vec != NULL && (flags & BTR_KEEP_POS_FLAG), - mtr); + btr_cur_compress_if_useful(cursor, mtr); if (page_zip && !dict_index_is_clust(index) && page_is_leaf(page)) { @@ -2450,21 +2441,6 @@ make_external: } } - if (big_rec_vec) { - ut_ad(page_is_leaf(page)); - ut_ad(dict_index_is_clust(index)); - ut_ad(flags & BTR_KEEP_POS_FLAG); - - /* btr_page_split_and_insert() in - btr_cur_pessimistic_insert() invokes - mtr_memo_release(mtr, index->lock, MTR_MEMO_X_LOCK). - We must keep the index->lock when we created a - big_rec, so that row_upd_clust_rec() can store the - big_rec in the same mini-transaction. */ - - mtr_x_lock(dict_index_get_lock(index), mtr); - } - /* Was the record to be updated positioned as the first user record on its page? */ was_first = page_cur_is_before_first(page_cursor); @@ -2480,7 +2456,6 @@ make_external: ut_a(rec); ut_a(err == DB_SUCCESS); ut_a(dummy_big_rec == NULL); - page_cursor->rec = rec; if (dict_index_is_sec_or_ibuf(index)) { /* Update PAGE_MAX_TRX_ID in the index page header. @@ -2915,12 +2890,10 @@ UNIV_INTERN ibool btr_cur_compress_if_useful( /*=======================*/ - btr_cur_t* cursor, /*!< in/out: cursor on the page to compress; - cursor does not stay valid if !adjust and - compression occurs */ - ibool adjust, /*!< in: TRUE if should adjust the - cursor position even if compression occurs */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + btr_cur_t* cursor, /*!< in: cursor on the page to compress; + cursor does not stay valid if compression + occurs */ + mtr_t* mtr) /*!< in: mtr */ { ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(btr_cur_get_index(cursor)), @@ -2929,7 +2902,7 @@ btr_cur_compress_if_useful( MTR_MEMO_PAGE_X_FIX)); return(btr_cur_compress_recommendation(cursor, mtr) - && btr_compress(cursor, adjust, mtr)); + && btr_compress(cursor, mtr)); } /*******************************************************//** @@ -3171,7 +3144,7 @@ return_after_reservations: mem_heap_free(heap); if (ret == FALSE) { - ret = btr_cur_compress_if_useful(cursor, FALSE, mtr); + ret = btr_cur_compress_if_useful(cursor, mtr); } if (n_extents > 0) { @@ -4160,9 +4133,6 @@ btr_store_big_rec_extern_fields_func( the "external storage" flags in offsets will not correspond to rec when this function returns */ - const big_rec_t*big_rec_vec, /*!< in: vector containing fields - to be stored externally */ - #ifdef UNIV_DEBUG mtr_t* local_mtr, /*!< in: mtr containing the latch to rec and to the tree */ @@ -4171,11 +4141,9 @@ btr_store_big_rec_extern_fields_func( ibool update_in_place,/*! in: TRUE if the record is updated in place (not delete+insert) */ #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ - mtr_t* alloc_mtr) /*!< in/out: in an insert, NULL; - in an update, local_mtr for - allocating BLOB pages and - updating BLOB pointers; alloc_mtr - must not have freed any leaf pages */ + const big_rec_t*big_rec_vec) /*!< in: vector containing fields + to be stored externally */ + { ulint rec_page_no; byte* field_ref; @@ -4194,9 +4162,6 @@ btr_store_big_rec_extern_fields_func( ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_any_extern(offsets)); - ut_ad(local_mtr); - ut_ad(!alloc_mtr || alloc_mtr == local_mtr); - ut_ad(!update_in_place || alloc_mtr); ut_ad(mtr_memo_contains(local_mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(local_mtr, rec_block, MTR_MEMO_PAGE_X_FIX)); @@ -4212,25 +4177,6 @@ btr_store_big_rec_extern_fields_func( rec_page_no = buf_block_get_page_no(rec_block); ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX); - if (alloc_mtr) { - /* Because alloc_mtr will be committed after - mtr, it is possible that the tablespace has been - extended when the B-tree record was updated or - inserted, or it will be extended while allocating - pages for big_rec. - - TODO: In mtr (not alloc_mtr), write a redo log record - about extending the tablespace to its current size, - and remember the current size. Whenever the tablespace - grows as pages are allocated, write further redo log - records to mtr. (Currently tablespace extension is not - covered by the redo log. If it were, the record would - only be written to alloc_mtr, which is committed after - mtr.) */ - } else { - alloc_mtr = &mtr; - } - if (UNIV_LIKELY_NULL(page_zip)) { int err; @@ -4307,7 +4253,7 @@ btr_store_big_rec_extern_fields_func( } block = btr_page_alloc(index, hint_page_no, - FSP_NO_DIR, 0, alloc_mtr, &mtr); + FSP_NO_DIR, 0, &mtr); if (UNIV_UNLIKELY(block == NULL)) { mtr_commit(&mtr); @@ -4434,15 +4380,11 @@ btr_store_big_rec_extern_fields_func( goto next_zip_page; } - if (alloc_mtr == &mtr) { - rec_block = buf_page_get( - space_id, zip_size, - rec_page_no, - RW_X_LATCH, &mtr); - buf_block_dbg_add_level( - rec_block, - SYNC_NO_ORDER_CHECK); - } + rec_block = buf_page_get(space_id, zip_size, + rec_page_no, + RW_X_LATCH, &mtr); + buf_block_dbg_add_level(rec_block, + SYNC_NO_ORDER_CHECK); if (err == Z_STREAM_END) { mach_write_to_4(field_ref @@ -4476,8 +4418,7 @@ btr_store_big_rec_extern_fields_func( page_zip_write_blob_ptr( page_zip, rec, index, offsets, - big_rec_vec->fields[i].field_no, - alloc_mtr); + big_rec_vec->fields[i].field_no, &mtr); next_zip_page: prev_page_no = page_no; @@ -4522,23 +4463,19 @@ next_zip_page: extern_len -= store_len; - if (alloc_mtr == &mtr) { - rec_block = buf_page_get( - space_id, zip_size, - rec_page_no, - RW_X_LATCH, &mtr); - buf_block_dbg_add_level( - rec_block, - SYNC_NO_ORDER_CHECK); - } + rec_block = buf_page_get(space_id, zip_size, + rec_page_no, + RW_X_LATCH, &mtr); + buf_block_dbg_add_level(rec_block, + SYNC_NO_ORDER_CHECK); mlog_write_ulint(field_ref + BTR_EXTERN_LEN, 0, - MLOG_4BYTES, alloc_mtr); + MLOG_4BYTES, &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_LEN + 4, big_rec_vec->fields[i].len - extern_len, - MLOG_4BYTES, alloc_mtr); + MLOG_4BYTES, &mtr); if (prev_page_no == FIL_NULL) { btr_blob_dbg_add_blob( @@ -4548,19 +4485,18 @@ next_zip_page: mlog_write_ulint(field_ref + BTR_EXTERN_SPACE_ID, - space_id, MLOG_4BYTES, - alloc_mtr); + space_id, + MLOG_4BYTES, &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO, - page_no, MLOG_4BYTES, - alloc_mtr); + page_no, + MLOG_4BYTES, &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_OFFSET, FIL_PAGE_DATA, - MLOG_4BYTES, - alloc_mtr); + MLOG_4BYTES, &mtr); } prev_page_no = page_no; diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index e0b9e979970..33b4cd40215 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -1715,6 +1715,31 @@ buf_page_set_accessed_make_young( } } +/********************************************************************//** +Resets the check_index_page_at_flush field of a page if found in the buffer +pool. */ +UNIV_INTERN +void +buf_reset_check_index_page_at_flush( +/*================================*/ + ulint space, /*!< in: space id */ + ulint offset) /*!< in: page number */ +{ + buf_block_t* block; + buf_pool_t* buf_pool = buf_pool_get(space, offset); + + buf_pool_mutex_enter(buf_pool); + + block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset); + + if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) { + ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page)); + block->check_index_page_at_flush = FALSE; + } + + buf_pool_mutex_exit(buf_pool); +} + /********************************************************************//** Returns the current state of is_hashed of a page. FALSE if the page is not in the pool. NOTE that this operation does not fix the page in the diff --git a/storage/innobase/fsp/fsp0fsp.c b/storage/innobase/fsp/fsp0fsp.c index 96d29e8ae32..3f09732a676 100644 --- a/storage/innobase/fsp/fsp0fsp.c +++ b/storage/innobase/fsp/fsp0fsp.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -311,9 +311,8 @@ fsp_fill_free_list( descriptor page and ibuf bitmap page; then we do not allocate more extents */ ulint space, /*!< in: space */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ - UNIV_COLD __attribute__((nonnull)); + fsp_header_t* header, /*!< in: space header */ + mtr_t* mtr); /*!< in: mtr */ /**********************************************************************//** Allocates a single free page from a segment. This function implements the intelligent allocation strategy which tries to minimize file space @@ -326,20 +325,14 @@ fseg_alloc_free_page_low( ulint space, /*!< in: space */ ulint zip_size,/*!< in: compressed page size in bytes or 0 for uncompressed pages */ - fseg_inode_t* seg_inode, /*!< in/out: segment inode */ + fseg_inode_t* seg_inode, /*!< in: segment inode */ ulint hint, /*!< in: hint of which page would be desirable */ byte direction, /*!< in: if the new page is needed because of an index page split, and records are inserted there in order, into which direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mini-transaction in which the - page should be initialized - (may be the same as mtr), or NULL if it - should not be initialized (the page at hint - was previously freed in mtr) */ - __attribute__((warn_unused_result, nonnull(3,6))); + mtr_t* mtr); /*!< in: mtr handle */ #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** @@ -707,18 +700,17 @@ list, if not free limit == space size. This adding is necessary to make the descriptor defined, as they are uninitialized above the free limit. @return pointer to the extent descriptor, NULL if the page does not exist in the space or if the offset exceeds the free limit */ -UNIV_INLINE __attribute__((nonnull, warn_unused_result)) +UNIV_INLINE xdes_t* xdes_get_descriptor_with_space_hdr( /*===============================*/ - fsp_header_t* sp_header, /*!< in/out: space header, x-latched - in mtr */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page offset; if equal - to the free limit, we try to - add new extents to the space - free list */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fsp_header_t* sp_header,/*!< in/out: space header, x-latched */ + ulint space, /*!< in: space id */ + ulint offset, /*!< in: page offset; + if equal to the free limit, + we try to add new extents to + the space free list */ + mtr_t* mtr) /*!< in: mtr handle */ { ulint limit; ulint size; @@ -726,9 +718,11 @@ xdes_get_descriptor_with_space_hdr( ulint descr_page_no; page_t* descr_page; + ut_ad(mtr); ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space, NULL), MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_S_FIX) + || mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX)); ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET); /* Read free limit and space size */ limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT); @@ -778,7 +772,7 @@ is necessary to make the descriptor defined, as they are uninitialized above the free limit. @return pointer to the extent descriptor, NULL if the page does not exist in the space or if the offset exceeds the free limit */ -static __attribute__((nonnull, warn_unused_result)) +static xdes_t* xdes_get_descriptor( /*================*/ @@ -787,7 +781,7 @@ xdes_get_descriptor( or 0 for uncompressed pages */ ulint offset, /*!< in: page offset; if equal to the free limit, we try to add new extents to the space free list */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + mtr_t* mtr) /*!< in: mtr handle */ { buf_block_t* block; fsp_header_t* sp_header; @@ -1165,14 +1159,14 @@ fsp_header_get_tablespace_size(void) Tries to extend a single-table tablespace so that a page would fit in the data file. @return TRUE if success */ -static UNIV_COLD __attribute__((nonnull, warn_unused_result)) +static ibool fsp_try_extend_data_file_with_pages( /*================================*/ ulint space, /*!< in: space */ ulint page_no, /*!< in: page number */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fsp_header_t* header, /*!< in: space header */ + mtr_t* mtr) /*!< in: mtr */ { ibool success; ulint actual_size; @@ -1197,7 +1191,7 @@ fsp_try_extend_data_file_with_pages( /***********************************************************************//** Tries to extend the last data file of a tablespace if it is auto-extending. @return FALSE if not auto-extending */ -static UNIV_COLD __attribute__((nonnull)) +static ibool fsp_try_extend_data_file( /*=====================*/ @@ -1207,8 +1201,8 @@ fsp_try_extend_data_file( the actual file size rounded down to megabyte */ ulint space, /*!< in: space */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fsp_header_t* header, /*!< in: space header */ + mtr_t* mtr) /*!< in: mtr */ { ulint size; ulint zip_size; @@ -1344,7 +1338,7 @@ fsp_fill_free_list( then we do not allocate more extents */ ulint space, /*!< in: space */ fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + mtr_t* mtr) /*!< in: mtr */ { ulint limit; ulint size; @@ -1542,47 +1536,10 @@ fsp_alloc_free_extent( return(descr); } -/**********************************************************************//** -Allocates a single free page from a space. */ -static __attribute__((nonnull)) -void -fsp_alloc_from_free_frag( -/*=====================*/ - fsp_header_t* header, /*!< in/out: tablespace header */ - xdes_t* descr, /*!< in/out: extent descriptor */ - ulint bit, /*!< in: slot to allocate in the extent */ - mtr_t* mtr) /*!< in/out: mini-transaction */ -{ - ulint frag_n_used; - - ut_ad(xdes_get_state(descr, mtr) == XDES_FREE_FRAG); - ut_a(xdes_get_bit(descr, XDES_FREE_BIT, bit, mtr)); - xdes_set_bit(descr, XDES_FREE_BIT, bit, FALSE, mtr); - - /* Update the FRAG_N_USED field */ - frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES, - mtr); - frag_n_used++; - mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used, MLOG_4BYTES, - mtr); - if (xdes_is_full(descr, mtr)) { - /* The fragment is full: move it to another list */ - flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE, - mtr); - xdes_set_state(descr, XDES_FULL_FRAG, mtr); - - flst_add_last(header + FSP_FULL_FRAG, descr + XDES_FLST_NODE, - mtr); - mlog_write_ulint(header + FSP_FRAG_N_USED, - frag_n_used - FSP_EXTENT_SIZE, MLOG_4BYTES, - mtr); - } -} - /**********************************************************************//** Allocates a single free page from a space. The page is marked as used. @return the page offset, FIL_NULL if no page could be allocated */ -static __attribute__((nonnull, warn_unused_result)) +static ulint fsp_alloc_free_page( /*================*/ @@ -1590,22 +1547,19 @@ fsp_alloc_free_page( ulint zip_size,/*!< in: compressed page size in bytes or 0 for uncompressed pages */ ulint hint, /*!< in: hint of which page would be desirable */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mini-transaction in which the - page should be initialized - (may be the same as mtr) */ + mtr_t* mtr) /*!< in: mtr handle */ { fsp_header_t* header; fil_addr_t first; xdes_t* descr; buf_block_t* block; ulint free; + ulint frag_n_used; ulint page_no; ulint space_size; ibool success; ut_ad(mtr); - ut_ad(init_mtr); header = fsp_get_space_header(space, zip_size, mtr); @@ -1687,19 +1641,38 @@ fsp_alloc_free_page( } } - fsp_alloc_from_free_frag(header, descr, free, mtr); + xdes_set_bit(descr, XDES_FREE_BIT, free, FALSE, mtr); + + /* Update the FRAG_N_USED field */ + frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES, + mtr); + frag_n_used++; + mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used, MLOG_4BYTES, + mtr); + if (xdes_is_full(descr, mtr)) { + /* The fragment is full: move it to another list */ + flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE, + mtr); + xdes_set_state(descr, XDES_FULL_FRAG, mtr); + + flst_add_last(header + FSP_FULL_FRAG, descr + XDES_FLST_NODE, + mtr); + mlog_write_ulint(header + FSP_FRAG_N_USED, + frag_n_used - FSP_EXTENT_SIZE, MLOG_4BYTES, + mtr); + } /* Initialize the allocated page to the buffer pool, so that it can be obtained immediately with buf_page_get without need for a disk read. */ - buf_page_create(space, page_no, zip_size, init_mtr); + buf_page_create(space, page_no, zip_size, mtr); - block = buf_page_get(space, zip_size, page_no, RW_X_LATCH, init_mtr); + block = buf_page_get(space, zip_size, page_no, RW_X_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_FSP_PAGE); /* Prior contents of the page should be ignored */ - fsp_init_file_page(block, init_mtr); + fsp_init_file_page(block, mtr); return(page_no); } @@ -1935,7 +1908,7 @@ fsp_alloc_seg_inode_page( zip_size = dict_table_flags_to_zip_size( mach_read_from_4(FSP_SPACE_FLAGS + space_header)); - page_no = fsp_alloc_free_page(space, zip_size, 0, mtr, mtr); + page_no = fsp_alloc_free_page(space, zip_size, 0, mtr); if (page_no == FIL_NULL) { @@ -2347,7 +2320,7 @@ fseg_create_general( if (page == 0) { page = fseg_alloc_free_page_low(space, zip_size, - inode, 0, FSP_UP, mtr, mtr); + inode, 0, FSP_UP, mtr); if (page == FIL_NULL) { @@ -2596,19 +2569,14 @@ fseg_alloc_free_page_low( ulint space, /*!< in: space */ ulint zip_size,/*!< in: compressed page size in bytes or 0 for uncompressed pages */ - fseg_inode_t* seg_inode, /*!< in/out: segment inode */ + fseg_inode_t* seg_inode, /*!< in: segment inode */ ulint hint, /*!< in: hint of which page would be desirable */ byte direction, /*!< in: if the new page is needed because of an index page split, and records are inserted there in order, into which direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mini-transaction in which the - page should be initialized - (may be the same as mtr), or NULL if it - should not be initialized (the page at hint - was previously freed in mtr) */ + mtr_t* mtr) /*!< in: mtr handle */ { fsp_header_t* space_header; ulint space_size; @@ -2619,6 +2587,7 @@ fseg_alloc_free_page_low( ulint ret_page; /*!< the allocated page offset, FIL_NULL if could not be allocated */ xdes_t* ret_descr; /*!< the extent of the allocated page */ + ibool frag_page_allocated = FALSE; ibool success; ulint n; @@ -2640,8 +2609,6 @@ fseg_alloc_free_page_low( if (descr == NULL) { /* Hint outside space or too high above free limit: reset hint */ - ut_a(init_mtr); - /* The file space header page is always allocated. */ hint = 0; descr = xdes_get_descriptor(space, zip_size, hint, mtr); } @@ -2652,20 +2619,15 @@ fseg_alloc_free_page_low( && mach_read_from_8(descr + XDES_ID) == seg_id && (xdes_get_bit(descr, XDES_FREE_BIT, hint % FSP_EXTENT_SIZE, mtr) == TRUE)) { -take_hinted_page: + /* 1. We can take the hinted page =================================*/ ret_descr = descr; ret_page = hint; - /* Skip the check for extending the tablespace. If the - page hint were not within the size of the tablespace, - we would have got (descr == NULL) above and reset the hint. */ - goto got_hinted_page; /*-----------------------------------------------------------*/ - } else if (xdes_get_state(descr, mtr) == XDES_FREE - && (!init_mtr - || ((reserved - used < reserved / FSEG_FILLFACTOR) - && used >= FSEG_FRAG_LIMIT))) { + } else if ((xdes_get_state(descr, mtr) == XDES_FREE) + && ((reserved - used) < reserved / FSEG_FILLFACTOR) + && (used >= FSEG_FRAG_LIMIT)) { /* 2. We allocate the free extent from space and can take ========================================================= @@ -2683,20 +2645,8 @@ take_hinted_page: /* Try to fill the segment free list */ fseg_fill_free_list(seg_inode, space, zip_size, hint + FSP_EXTENT_SIZE, mtr); - goto take_hinted_page; - /*-----------------------------------------------------------*/ - } else if (!init_mtr) { - ut_a(xdes_get_state(descr, mtr) == XDES_FREE_FRAG); - fsp_alloc_from_free_frag(space_header, descr, - hint % FSP_EXTENT_SIZE, mtr); ret_page = hint; - ret_descr = NULL; - - /* Put the page in the fragment page array of the segment */ - n = fseg_find_free_frag_page_slot(seg_inode, mtr); - ut_a(n != FIL_NULL); - fseg_set_nth_frag_page_no(seg_inode, n, ret_page, mtr); - goto got_hinted_page; + /*-----------------------------------------------------------*/ } else if ((direction != FSP_NO_DIR) && ((reserved - used) < reserved / FSEG_FILLFACTOR) && (used >= FSEG_FRAG_LIMIT) @@ -2755,10 +2705,11 @@ take_hinted_page: } else if (used < FSEG_FRAG_LIMIT) { /* 6. We allocate an individual page from the space ===================================================*/ - ret_page = fsp_alloc_free_page(space, zip_size, hint, - mtr, init_mtr); + ret_page = fsp_alloc_free_page(space, zip_size, hint, mtr); ret_descr = NULL; + frag_page_allocated = TRUE; + if (ret_page != FIL_NULL) { /* Put the page in the fragment page array of the segment */ @@ -2768,10 +2719,6 @@ take_hinted_page: fseg_set_nth_frag_page_no(seg_inode, n, ret_page, mtr); } - - /* fsp_alloc_free_page() invoked fsp_init_file_page() - already. */ - return(ret_page); /*-----------------------------------------------------------*/ } else { /* 7. We allocate a new extent and take its first page @@ -2819,34 +2766,26 @@ take_hinted_page: } } -got_hinted_page: - { + if (!frag_page_allocated) { /* Initialize the allocated page to buffer pool, so that it can be obtained immediately with buf_page_get without need for a disk read */ buf_block_t* block; ulint zip_size = dict_table_flags_to_zip_size( mach_read_from_4(FSP_SPACE_FLAGS + space_header)); - mtr_t* block_mtr = init_mtr ? init_mtr : mtr; - block = buf_page_create(space, ret_page, zip_size, block_mtr); + block = buf_page_create(space, ret_page, zip_size, mtr); buf_block_dbg_add_level(block, SYNC_FSP_PAGE); if (UNIV_UNLIKELY(block != buf_page_get(space, zip_size, ret_page, RW_X_LATCH, - block_mtr))) { + mtr))) { ut_error; } - if (init_mtr) { - /* The prior contents of the page should be ignored */ - fsp_init_file_page(block, init_mtr); - } - } + /* The prior contents of the page should be ignored */ + fsp_init_file_page(block, mtr); - /* ret_descr == NULL if the block was allocated from free_frag - (XDES_FREE_FRAG) */ - if (ret_descr != NULL) { /* At this point we know the extent and the page offset. The extent is still in the appropriate list (FSEG_NOT_FULL or FSEG_FREE), and the page is not yet marked as used. */ @@ -2859,6 +2798,8 @@ got_hinted_page: fseg_mark_page_used(seg_inode, space, zip_size, ret_page, mtr); } + buf_reset_check_index_page_at_flush(space, ret_page); + return(ret_page); } @@ -2871,7 +2812,7 @@ UNIV_INTERN ulint fseg_alloc_free_page_general( /*=========================*/ - fseg_header_t* seg_header,/*!< in/out: segment header */ + fseg_header_t* seg_header,/*!< in: segment header */ ulint hint, /*!< in: hint of which page would be desirable */ byte direction,/*!< in: if the new page is needed because of an index page split, and records are @@ -2883,11 +2824,7 @@ fseg_alloc_free_page_general( with fsp_reserve_free_extents, then there is no need to do the check for this individual page */ - mtr_t* mtr, /*!< in/out: mini-transaction handle */ - mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction - in which the page should be initialized, - or NULL if this is a "fake allocation" of - a page that was previously freed in mtr */ + mtr_t* mtr) /*!< in: mtr handle */ { fseg_inode_t* inode; ulint space; @@ -2929,8 +2866,7 @@ fseg_alloc_free_page_general( } page_no = fseg_alloc_free_page_low(space, zip_size, - inode, hint, direction, - mtr, init_mtr); + inode, hint, direction, mtr); if (!has_done_reservation) { fil_space_release_free_extents(space, n_reserved); } @@ -2938,6 +2874,28 @@ fseg_alloc_free_page_general( return(page_no); } +/**********************************************************************//** +Allocates a single free page from a segment. This function implements +the intelligent allocation strategy which tries to minimize file space +fragmentation. +@return allocated page offset, FIL_NULL if no page could be allocated */ +UNIV_INTERN +ulint +fseg_alloc_free_page( +/*=================*/ + fseg_header_t* seg_header,/*!< in: segment header */ + ulint hint, /*!< in: hint of which page would be desirable */ + byte direction,/*!< in: if the new page is needed because + of an index page split, and records are + inserted there in order, into which + direction they go alphabetically: FSP_DOWN, + FSP_UP, FSP_NO_DIR */ + mtr_t* mtr) /*!< in: mtr handle */ +{ + return(fseg_alloc_free_page_general(seg_header, hint, direction, + FALSE, mtr)); +} + /**********************************************************************//** Checks that we have at least 2 frag pages free in the first extent of a single-table tablespace, and they are also physically initialized to the data diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index 3dfd98ceb08..71e772388a0 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -499,14 +499,11 @@ UNIV_INTERN ibool btr_compress( /*=========*/ - btr_cur_t* cursor, /*!< in/out: cursor on the page to merge - or lift; the page must not be empty: - when deleting records, use btr_discard_page() - if the page would become empty */ - ibool adjust, /*!< in: TRUE if should adjust the - cursor position even if compression occurs */ - mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + btr_cur_t* cursor, /*!< in: cursor on the page to merge or lift; + the page must not be empty: in record delete + use btr_discard_page if the page would become + empty */ + mtr_t* mtr); /*!< in: mtr */ /*************************************************************//** Discards a page from a B-tree. This is used to remove the last record from a B-tree page: the whole page must be removed at the same time. This cannot @@ -568,12 +565,7 @@ btr_page_alloc( page split is made */ ulint level, /*!< in: level where the page is placed in the tree */ - mtr_t* mtr, /*!< in/out: mini-transaction - for the allocation */ - mtr_t* init_mtr) /*!< in/out: mini-transaction - for x-latching and initializing - the page */ - __attribute__((nonnull, warn_unused_result)); + mtr_t* mtr); /*!< in: mtr */ /**************************************************************//** Frees a file page used in an index tree. NOTE: cannot free field external storage pages because the page must contain info on its level. */ @@ -596,33 +588,6 @@ btr_page_free_low( buf_block_t* block, /*!< in: block to be freed, x-latched */ ulint level, /*!< in: page level */ mtr_t* mtr); /*!< in: mtr */ -/**************************************************************//** -Marks all MTR_MEMO_FREE_CLUST_LEAF pages nonfree or free. -For invoking btr_store_big_rec_extern_fields() after an update, -we must temporarily mark freed clustered index pages allocated, so -that off-page columns will not be allocated from them. Between the -btr_store_big_rec_extern_fields() and mtr_commit() we have to -mark the pages free again, so that no pages will be leaked. */ -UNIV_INTERN -void -btr_mark_freed_leaves( -/*==================*/ - dict_index_t* index, /*!< in/out: clustered index */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - ibool nonfree)/*!< in: TRUE=mark nonfree, FALSE=mark freed */ - UNIV_COLD __attribute__((nonnull)); -#ifdef UNIV_DEBUG -/**************************************************************//** -Validates all pages marked MTR_MEMO_FREE_CLUST_LEAF. -@see btr_mark_freed_leaves() -@return TRUE */ -UNIV_INTERN -ibool -btr_freed_leaves_validate( -/*======================*/ - mtr_t* mtr) /*!< in: mini-transaction */ - __attribute__((nonnull, warn_unused_result)); -#endif /* UNIV_DEBUG */ #ifdef UNIV_BTR_PRINT /*************************************************************//** Prints size info of a B-tree. */ diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 26ed766dbd4..be918439f59 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,9 +36,6 @@ Created 10/16/1994 Heikki Tuuri #define BTR_NO_LOCKING_FLAG 2 /* do no record lock checking */ #define BTR_KEEP_SYS_FLAG 4 /* sys fields will be found from the update vector or inserted entry */ -#define BTR_KEEP_POS_FLAG 8 /* btr_cur_pessimistic_update() - must keep cursor position when - moving columns to big_rec */ #ifndef UNIV_HOTBACKUP #include "que0types.h" @@ -313,9 +310,7 @@ btr_cur_pessimistic_update( /*=======================*/ ulint flags, /*!< in: undo logging, locking, and rollback flags */ - btr_cur_t* cursor, /*!< in/out: cursor on the record to update; - cursor may become invalid if *big_rec == NULL - || !(flags & BTR_KEEP_POS_FLAG) */ + btr_cur_t* cursor, /*!< in: cursor on the record to update */ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to be stored externally by the caller, or NULL */ @@ -369,13 +364,10 @@ UNIV_INTERN ibool btr_cur_compress_if_useful( /*=======================*/ - btr_cur_t* cursor, /*!< in/out: cursor on the page to compress; + btr_cur_t* cursor, /*!< in: cursor on the page to compress; cursor does not stay valid if compression occurs */ - ibool adjust, /*!< in: TRUE if should adjust the - cursor position even if compression occurs */ - mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + mtr_t* mtr); /*!< in: mtr */ /*******************************************************//** Removes the record on which the tree cursor is positioned. It is assumed that the mtr has an x-latch on the page where the cursor is positioned, @@ -518,8 +510,6 @@ btr_store_big_rec_extern_fields_func( the "external storage" flags in offsets will not correspond to rec when this function returns */ - const big_rec_t*big_rec_vec, /*!< in: vector containing fields - to be stored externally */ #ifdef UNIV_DEBUG mtr_t* local_mtr, /*!< in: mtr containing the latch to rec and to the tree */ @@ -528,12 +518,9 @@ btr_store_big_rec_extern_fields_func( ibool update_in_place,/*! in: TRUE if the record is updated in place (not delete+insert) */ #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ - mtr_t* alloc_mtr) /*!< in/out: in an insert, NULL; - in an update, local_mtr for - allocating BLOB pages and - updating BLOB pointers; alloc_mtr - must not have freed any leaf pages */ - __attribute__((nonnull(1,2,3,4,5), warn_unused_result)); + const big_rec_t*big_rec_vec) /*!< in: vector containing fields + to be stored externally */ + __attribute__((nonnull)); /** Stores the fields in big_rec_vec to the tablespace and puts pointers to them in rec. The extern flags in rec will have to be set beforehand. @@ -542,22 +529,21 @@ file segment of the index tree. @param index in: clustered index; MUST be X-latched by mtr @param b in/out: block containing rec; MUST be X-latched by mtr @param rec in/out: clustered index record -@param offs in: rec_get_offsets(rec, index); +@param offsets in: rec_get_offsets(rec, index); the "external storage" flags in offsets will not be adjusted -@param big in: vector containing fields to be stored externally @param mtr in: mini-transaction that holds x-latch on index and b @param upd in: TRUE if the record is updated in place (not delete+insert) -@param rmtr in/out: in updates, the mini-transaction that holds rec +@param big in: vector containing fields to be stored externally @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ #ifdef UNIV_DEBUG -# define btr_store_big_rec_extern_fields(index,b,rec,offs,big,mtr,upd,rmtr) \ - btr_store_big_rec_extern_fields_func(index,b,rec,offs,big,mtr,upd,rmtr) +# define btr_store_big_rec_extern_fields(index,b,rec,offsets,mtr,upd,big) \ + btr_store_big_rec_extern_fields_func(index,b,rec,offsets,mtr,upd,big) #elif defined UNIV_BLOB_LIGHT_DEBUG -# define btr_store_big_rec_extern_fields(index,b,rec,offs,big,mtr,upd,rmtr) \ - btr_store_big_rec_extern_fields_func(index,b,rec,offs,big,upd,rmtr) +# define btr_store_big_rec_extern_fields(index,b,rec,offsets,mtr,upd,big) \ + btr_store_big_rec_extern_fields_func(index,b,rec,offsets,upd,big) #else -# define btr_store_big_rec_extern_fields(index,b,rec,offs,big,mtr,upd,rmtr) \ - btr_store_big_rec_extern_fields_func(index,b,rec,offs,big,rmtr) +# define btr_store_big_rec_extern_fields(index,b,rec,offsets,mtr,upd,big) \ + btr_store_big_rec_extern_fields_func(index,b,rec,offsets,big) #endif /*******************************************************************//** diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic index c833b3e8572..280583f6ccf 100644 --- a/storage/innobase/include/btr0cur.ic +++ b/storage/innobase/include/btr0cur.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -139,7 +139,7 @@ btr_cur_compress_recommendation( btr_cur_t* cursor, /*!< in: btr cursor */ mtr_t* mtr) /*!< in: mtr */ { - const page_t* page; + page_t* page; ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), MTR_MEMO_PAGE_X_FIX)); diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 03b80fce2e8..ccebb69a4fe 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -491,6 +491,15 @@ buf_page_peek( /*==========*/ ulint space, /*!< in: space id */ ulint offset);/*!< in: page number */ +/********************************************************************//** +Resets the check_index_page_at_flush field of a page if found in the buffer +pool. */ +UNIV_INTERN +void +buf_reset_check_index_page_at_flush( +/*================================*/ + ulint space, /*!< in: space id */ + ulint offset);/*!< in: page number */ #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG /********************************************************************//** Sets file_page_was_freed TRUE if the page is found in the buffer pool. @@ -600,31 +609,6 @@ buf_block_get_modify_clock( #else /* !UNIV_HOTBACKUP */ # define buf_block_modify_clock_inc(block) ((void) 0) #endif /* !UNIV_HOTBACKUP */ -/*******************************************************************//** -Increments the bufferfix count. */ -UNIV_INLINE -void -buf_block_buf_fix_inc_func( -/*=======================*/ -#ifdef UNIV_SYNC_DEBUG - const char* file, /*!< in: file name */ - ulint line, /*!< in: line */ -#endif /* UNIV_SYNC_DEBUG */ - buf_block_t* block) /*!< in/out: block to bufferfix */ - __attribute__((nonnull)); -#ifdef UNIV_SYNC_DEBUG -/** Increments the bufferfix count. -@param b in/out: block to bufferfix -@param f in: file name where requested -@param l in: line number where requested */ -# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(f,l,b) -#else /* UNIV_SYNC_DEBUG */ -/** Increments the bufferfix count. -@param b in/out: block to bufferfix -@param f in: file name where requested -@param l in: line number where requested */ -# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(b) -#endif /* UNIV_SYNC_DEBUG */ /********************************************************************//** Calculates a page checksum which is stored to the page when it is written to a file. Note that we must be careful to calculate the same value diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index 0e80ce55e57..b65b5133c15 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -944,6 +944,19 @@ buf_block_buf_fix_inc_func( block->page.buf_fix_count++; } +#ifdef UNIV_SYNC_DEBUG +/** Increments the bufferfix count. +@param b in/out: block to bufferfix +@param f in: file name where requested +@param l in: line number where requested */ +# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(f,l,b) +#else /* UNIV_SYNC_DEBUG */ +/** Increments the bufferfix count. +@param b in/out: block to bufferfix +@param f in: file name where requested +@param l in: line number where requested */ +# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(b) +#endif /* UNIV_SYNC_DEBUG */ /*******************************************************************//** Decrements the bufferfix count. */ @@ -1194,7 +1207,7 @@ buf_block_dbg_add_level( where we have acquired latch */ ulint level) /*!< in: latching order level */ { - sync_thread_add_level(&block->lock, level, FALSE); + sync_thread_add_level(&block->lock, level); } #endif /* UNIV_SYNC_DEBUG */ /********************************************************************//** diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 2221380c9a2..7abd3914eda 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -176,18 +176,19 @@ fseg_n_reserved_pages( Allocates a single free page from a segment. This function implements the intelligent allocation strategy which tries to minimize file space fragmentation. -@param[in/out] seg_header segment header -@param[in] hint hint of which page would be desirable -@param[in] direction if the new page is needed because +@return the allocated page offset FIL_NULL if no page could be allocated */ +UNIV_INTERN +ulint +fseg_alloc_free_page( +/*=================*/ + fseg_header_t* seg_header, /*!< in: segment header */ + ulint hint, /*!< in: hint of which page would be desirable */ + byte direction, /*!< in: if the new page is needed because of an index page split, and records are inserted there in order, into which direction they go alphabetically: FSP_DOWN, - FSP_UP, FSP_NO_DIR -@param[in/out] mtr mini-transaction -@return the allocated page offset FIL_NULL if no page could be allocated */ -#define fseg_alloc_free_page(seg_header, hint, direction, mtr) \ - fseg_alloc_free_page_general(seg_header, hint, direction, \ - FALSE, mtr, mtr) + FSP_UP, FSP_NO_DIR */ + mtr_t* mtr); /*!< in: mtr handle */ /**********************************************************************//** Allocates a single free page from a segment. This function implements the intelligent allocation strategy which tries to minimize file space @@ -197,7 +198,7 @@ UNIV_INTERN ulint fseg_alloc_free_page_general( /*=========================*/ - fseg_header_t* seg_header,/*!< in/out: segment header */ + fseg_header_t* seg_header,/*!< in: segment header */ ulint hint, /*!< in: hint of which page would be desirable */ byte direction,/*!< in: if the new page is needed because of an index page split, and records are @@ -209,12 +210,7 @@ fseg_alloc_free_page_general( with fsp_reserve_free_extents, then there is no need to do the check for this individual page */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction - in which the page should be initialized, - or NULL if this is a "fake allocation" of - a page that was previously freed in mtr */ - __attribute__((warn_unused_result, nonnull(1,5))); + mtr_t* mtr); /*!< in: mtr handle */ /**********************************************************************//** Reserves free pages from a tablespace. All mini-transactions which may use several pages from the tablespace should call this function beforehand diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h index 185a0953231..7f608546cc2 100644 --- a/storage/innobase/include/mtr0mtr.h +++ b/storage/innobase/include/mtr0mtr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -53,8 +53,6 @@ first 3 values must be RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ #define MTR_MEMO_MODIFY 54 #define MTR_MEMO_S_LOCK 55 #define MTR_MEMO_X_LOCK 56 -/** The mini-transaction freed a clustered index leaf page. */ -#define MTR_MEMO_FREE_CLUST_LEAF 57 /** @name Log item types The log items are declared 'byte' so that the compiler can warn if val @@ -370,14 +368,11 @@ struct mtr_struct{ #endif dyn_array_t memo; /*!< memo stack for locks etc. */ dyn_array_t log; /*!< mini-transaction log */ - unsigned inside_ibuf:1; + ibool inside_ibuf; /*!< TRUE if inside ibuf changes */ - unsigned modifications:1; - /*!< TRUE if the mini-transaction - modified buffer pool pages */ - unsigned freed_clust_leaf:1; - /*!< TRUE if MTR_MEMO_FREE_CLUST_LEAF - was logged in the mini-transaction */ + ibool modifications; + /* TRUE if the mtr made modifications to + buffer pool pages */ ulint n_log_recs; /* count of how many page initial log records have been written to the mtr log */ diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic index 3541f359338..1db4a4bd735 100644 --- a/storage/innobase/include/mtr0mtr.ic +++ b/storage/innobase/include/mtr0mtr.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -43,9 +43,8 @@ mtr_start( dyn_array_create(&(mtr->log)); mtr->log_mode = MTR_LOG_ALL; - mtr->inside_ibuf = FALSE; mtr->modifications = FALSE; - mtr->freed_clust_leaf = FALSE; + mtr->inside_ibuf = FALSE; mtr->n_log_recs = 0; ut_d(mtr->state = MTR_ACTIVE); @@ -67,8 +66,7 @@ mtr_memo_push( ut_ad(object); ut_ad(type >= MTR_MEMO_PAGE_S_FIX); - ut_ad(type <= MTR_MEMO_FREE_CLUST_LEAF); - ut_ad(type != MTR_MEMO_FREE_CLUST_LEAF || mtr->freed_clust_leaf); + ut_ad(type <= MTR_MEMO_X_LOCK); ut_ad(mtr); ut_ad(mtr->magic_n == MTR_MAGIC_N); ut_ad(mtr->state == MTR_ACTIVE); diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic index 81474fa35f5..3520677dfb3 100644 --- a/storage/innobase/include/page0cur.ic +++ b/storage/innobase/include/page0cur.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,8 +27,6 @@ Created 10/4/1994 Heikki Tuuri #include "buf0types.h" #ifdef UNIV_DEBUG -# include "rem0cmp.h" - /*********************************************************//** Gets pointer to the page frame where the cursor is positioned. @return page */ @@ -270,7 +268,6 @@ page_cur_tuple_insert( index, rec, offsets, mtr); } - ut_ad(!rec || !cmp_dtuple_rec(tuple, rec, offsets)); mem_heap_free(heap); return(rec); } diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index ad1445b3935..6a82e820312 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -281,42 +281,16 @@ page_get_supremum_offset( const page_t* page); /*!< in: page which must have record(s) */ #define page_get_infimum_rec(page) ((page) + page_get_infimum_offset(page)) #define page_get_supremum_rec(page) ((page) + page_get_supremum_offset(page)) - /************************************************************//** -Returns the nth record of the record list. -This is the inverse function of page_rec_get_n_recs_before(). -@return nth record */ -UNIV_INTERN -const rec_t* -page_rec_get_nth_const( -/*===================*/ - const page_t* page, /*!< in: page */ - ulint nth) /*!< in: nth record */ - __attribute__((nonnull, warn_unused_result)); -/************************************************************//** -Returns the nth record of the record list. -This is the inverse function of page_rec_get_n_recs_before(). -@return nth record */ -UNIV_INLINE -rec_t* -page_rec_get_nth( -/*=============*/ - page_t* page, /*< in: page */ - ulint nth) /*!< in: nth record */ - __attribute__((nonnull, warn_unused_result)); - -#ifndef UNIV_HOTBACKUP -/************************************************************//** -Returns the middle record of the records on the page. If there is an -even number of records in the list, returns the first record of the -upper half-list. +Returns the middle record of record list. If there are an even number +of records in the list, returns the first record of upper half-list. @return middle record */ -UNIV_INLINE +UNIV_INTERN rec_t* page_get_middle_rec( /*================*/ - page_t* page) /*!< in: page */ - __attribute__((nonnull, warn_unused_result)); + page_t* page); /*!< in: page */ +#ifndef UNIV_HOTBACKUP /*************************************************************//** Compares a data tuple to a physical record. Differs from the function cmp_dtuple_rec_with_match in the way that the record must reside on an @@ -371,7 +345,6 @@ page_get_n_recs( /***************************************************************//** Returns the number of records before the given record in chain. The number includes infimum and supremum records. -This is the inverse function of page_rec_get_nth(). @return number of records */ UNIV_INTERN ulint diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic index c1a0ce73982..115cee64f8b 100644 --- a/storage/innobase/include/page0page.ic +++ b/storage/innobase/include/page0page.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -419,37 +419,7 @@ page_rec_is_infimum( return(page_rec_is_infimum_low(page_offset(rec))); } -/************************************************************//** -Returns the nth record of the record list. -This is the inverse function of page_rec_get_n_recs_before(). -@return nth record */ -UNIV_INLINE -rec_t* -page_rec_get_nth( -/*=============*/ - page_t* page, /*!< in: page */ - ulint nth) /*!< in: nth record */ -{ - return((rec_t*) page_rec_get_nth_const(page, nth)); -} - #ifndef UNIV_HOTBACKUP -/************************************************************//** -Returns the middle record of the records on the page. If there is an -even number of records in the list, returns the first record of the -upper half-list. -@return middle record */ -UNIV_INLINE -rec_t* -page_get_middle_rec( -/*================*/ - page_t* page) /*!< in: page */ -{ - ulint middle = (page_get_n_recs(page) + PAGE_HEAP_NO_USER_LOW) / 2; - - return(page_rec_get_nth(page, middle)); -} - /*************************************************************//** Compares a data tuple to a physical record. Differs from the function cmp_dtuple_rec_with_match in the way that the record must reside on an diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index 46914d13c7f..10b74d18c13 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -480,7 +480,7 @@ ulint rec_offs_any_extern( /*================*/ const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -491,7 +491,7 @@ rec_offs_any_null_extern( const rec_t* rec, /*!< in: record */ const ulint* offsets) /*!< in: rec_get_offsets(rec) */ __attribute__((nonnull, warn_unused_result)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. @return nonzero if externally stored */ diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index ebda6105bf1..dc8ed515c30 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -1088,7 +1088,7 @@ rec_offs_any_extern( return(UNIV_UNLIKELY(*rec_offs_base(offsets) & REC_OFFS_EXTERNAL)); } -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -1124,7 +1124,7 @@ rec_offs_any_null_extern( return(NULL); } -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic index 5d15677ccce..2ffd9fdafb5 100644 --- a/storage/innobase/include/sync0rw.ic +++ b/storage/innobase/include/sync0rw.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -603,16 +603,16 @@ rw_lock_x_unlock_direct( ut_ad((lock->lock_word % X_LOCK_DECR) == 0); +#ifdef UNIV_SYNC_DEBUG + rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX); +#endif + if (lock->lock_word == 0) { lock->recursive = FALSE; UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread); } -#ifdef UNIV_SYNC_DEBUG - rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX); -#endif - lock->lock_word += X_LOCK_DECR; ut_ad(!lock->waiters); diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index f6b8897522f..d9dea0aa63d 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2011, Innobase Oy. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -400,10 +400,8 @@ void sync_thread_add_level( /*==================*/ void* latch, /*!< in: pointer to a mutex or an rw-lock */ - ulint level, /*!< in: level in the latching order; if + ulint level); /*!< in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ - ibool relock) /*!< in: TRUE if re-entering an x-lock */ - __attribute__((nonnull)); /******************************************************************//** Removes a latch from the thread level array if it is found there. @return TRUE if found in the array; it is no error if the latch is diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index cb175c2c234..e86cd4402bf 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -1,7 +1,8 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 2008, Google Inc. +Copyright (c) 2009, Sun Microsystems, Inc. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -185,6 +186,8 @@ command. Not tested on Windows. */ debugging without UNIV_DEBUG */ #define UNIV_BLOB_LIGHT_DEBUG /* Enable off-page column debugging without UNIV_DEBUG */ +#define UNIV_BLOB_NULL_DEBUG /* Enable deep off-page + column debugging */ #define UNIV_DEBUG /* Enable ut_ad() assertions and disable UNIV_INLINE */ #define UNIV_DEBUG_LOCK_VALIDATE /* Enable diff --git a/storage/innobase/mtr/mtr0mtr.c b/storage/innobase/mtr/mtr0mtr.c index fde87cb3cd3..08234609ff0 100644 --- a/storage/innobase/mtr/mtr0mtr.c +++ b/storage/innobase/mtr/mtr0mtr.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -64,11 +64,12 @@ mtr_memo_slot_release( buf_page_release((buf_block_t*)object, type); } else if (type == MTR_MEMO_S_LOCK) { rw_lock_s_unlock((rw_lock_t*)object); +#ifdef UNIV_DEBUG } else if (type != MTR_MEMO_X_LOCK) { - ut_ad(type == MTR_MEMO_MODIFY - || type == MTR_MEMO_FREE_CLUST_LEAF); + ut_ad(type == MTR_MEMO_MODIFY); ut_ad(mtr_memo_contains(mtr, object, MTR_MEMO_PAGE_X_FIX)); +#endif /* UNIV_DEBUG */ } else { rw_lock_x_unlock((rw_lock_t*)object); } diff --git a/storage/innobase/page/page0cur.c b/storage/innobase/page/page0cur.c index b8c492328e8..936762b986a 100644 --- a/storage/innobase/page/page0cur.c +++ b/storage/innobase/page/page0cur.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1180,15 +1180,14 @@ page_cur_insert_rec_zip_reorg( /* Before trying to reorganize the page, store the number of preceding records on the page. */ pos = page_rec_get_n_recs_before(rec); - ut_ad(pos > 0); if (page_zip_reorganize(block, index, mtr)) { /* The page was reorganized: Find rec by seeking to pos, and update *current_rec. */ - if (pos > 1) { - rec = page_rec_get_nth(page, pos - 1); - } else { - rec = page + PAGE_NEW_INFIMUM; + rec = page + PAGE_NEW_INFIMUM; + + while (--pos) { + rec = page + rec_get_next_offs(rec, TRUE); } *current_rec = rec; @@ -1284,12 +1283,6 @@ page_cur_insert_rec_zip( insert_rec = page_cur_insert_rec_zip_reorg( current_rec, block, index, insert_rec, page, page_zip, mtr); -#ifdef UNIV_DEBUG - if (insert_rec) { - rec_offs_make_valid( - insert_rec, index, offsets); - } -#endif /* UNIV_DEBUG */ } return(insert_rec); diff --git a/storage/innobase/page/page0page.c b/storage/innobase/page/page0page.c index 1c74a1d5cab..6064d028ae1 100644 --- a/storage/innobase/page/page0page.c +++ b/storage/innobase/page/page0page.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1465,54 +1465,55 @@ page_dir_balance_slot( } } +#ifndef UNIV_HOTBACKUP /************************************************************//** -Returns the nth record of the record list. -This is the inverse function of page_rec_get_n_recs_before(). -@return nth record */ +Returns the middle record of the record list. If there are an even number +of records in the list, returns the first record of the upper half-list. +@return middle record */ UNIV_INTERN -const rec_t* -page_rec_get_nth_const( -/*===================*/ - const page_t* page, /*!< in: page */ - ulint nth) /*!< in: nth record */ +rec_t* +page_get_middle_rec( +/*================*/ + page_t* page) /*!< in: page */ { - const page_dir_slot_t* slot; + page_dir_slot_t* slot; + ulint middle; ulint i; ulint n_owned; - const rec_t* rec; + ulint count; + rec_t* rec; - ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); + /* This many records we must leave behind */ + middle = (page_get_n_recs(page) + PAGE_HEAP_NO_USER_LOW) / 2; + + count = 0; for (i = 0;; i++) { slot = page_dir_get_nth_slot(page, i); n_owned = page_dir_slot_get_n_owned(slot); - if (n_owned > nth) { + if (count + n_owned > middle) { break; } else { - nth -= n_owned; + count += n_owned; } } ut_ad(i > 0); slot = page_dir_get_nth_slot(page, i - 1); - rec = page_dir_slot_get_rec(slot); + rec = (rec_t*) page_dir_slot_get_rec(slot); + rec = page_rec_get_next(rec); - if (page_is_comp(page)) { - do { - rec = page_rec_get_next_low(rec, TRUE); - ut_ad(rec); - } while (nth--); - } else { - do { - rec = page_rec_get_next_low(rec, FALSE); - ut_ad(rec); - } while (nth--); + /* There are now count records behind rec */ + + for (i = 0; i < middle - count; i++) { + rec = page_rec_get_next(rec); } return(rec); } +#endif /* !UNIV_HOTBACKUP */ /***************************************************************//** Returns the number of records before the given record in chain. @@ -1574,7 +1575,6 @@ page_rec_get_n_recs_before( n--; ut_ad(n >= 0); - ut_ad(n < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); return((ulint) n); } diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c index 5f10a763fc4..2925feb2904 100644 --- a/storage/innobase/row/row0ins.c +++ b/storage/innobase/row/row0ins.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -348,9 +348,9 @@ row_ins_clust_index_entry_by_modify( return(DB_LOCK_TABLE_FULL); } - err = btr_cur_pessimistic_update( - BTR_KEEP_POS_FLAG, cursor, heap, big_rec, update, - 0, thr, mtr); + err = btr_cur_pessimistic_update(0, cursor, + heap, big_rec, update, + 0, thr, mtr); } return(err); @@ -1976,7 +1976,6 @@ row_ins_index_entry_low( ulint modify = 0; /* remove warning */ rec_t* insert_rec; rec_t* rec; - ulint* offsets; ulint err; ulint n_unique; big_rec_t* big_rec = NULL; @@ -2084,51 +2083,6 @@ row_ins_index_entry_low( err = row_ins_clust_index_entry_by_modify( mode, &cursor, &heap, &big_rec, entry, thr, &mtr); - - if (big_rec) { - ut_a(err == DB_SUCCESS); - /* Write out the externally stored - columns, but allocate the pages and - write the pointers using the - mini-transaction of the record update. - If any pages were freed in the update, - temporarily mark them allocated so - that off-page columns will not - overwrite them. We must do this, - because we will write the redo log for - the BLOB writes before writing the - redo log for the record update. Thus, - redo log application at crash recovery - will see BLOBs being written to free pages. */ - - btr_mark_freed_leaves(index, &mtr, TRUE); - - rec = btr_cur_get_rec(&cursor); - offsets = rec_get_offsets( - rec, index, NULL, - ULINT_UNDEFINED, &heap); - - err = btr_store_big_rec_extern_fields( - index, btr_cur_get_block(&cursor), - rec, offsets, big_rec, &mtr, - FALSE, &mtr); - /* If writing big_rec fails (for - example, because of DB_OUT_OF_FILE_SPACE), - the record will be corrupted. Even if - we did not update any externally - stored columns, our update could cause - the record to grow so that a - non-updated column was selected for - external storage. This non-update - would not have been written to the - undo log, and thus the record cannot - be rolled back. */ - ut_a(err == DB_SUCCESS); - /* Free the pages again - in order to avoid a leak. */ - btr_mark_freed_leaves(index, &mtr, FALSE); - goto stored_big_rec; - } } else { ut_ad(!n_ext); err = row_ins_sec_index_entry_by_modify( @@ -2157,6 +2111,8 @@ function_exit: mtr_commit(&mtr); if (UNIV_LIKELY_NULL(big_rec)) { + rec_t* rec; + ulint* offsets; mtr_start(&mtr); btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, @@ -2168,9 +2124,8 @@ function_exit: err = btr_store_big_rec_extern_fields( index, btr_cur_get_block(&cursor), - rec, offsets, big_rec, &mtr, FALSE, NULL); + rec, offsets, &mtr, FALSE, big_rec); -stored_big_rec: if (modify) { dtuple_big_rec_free(big_rec); } else { @@ -2443,7 +2398,7 @@ row_ins( node->index = dict_table_get_next_index(node->index); node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry); - /* Skip corrupted secondary index and its entry */ + /* Skip corrupted secondar index and its entry */ while (node->index && dict_index_is_corrupted(node->index)) { node->index = dict_table_get_next_index(node->index); diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index 8892c8dc289..20fd475343e 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -233,7 +233,6 @@ row_build( ut_ad(index && rec && heap); ut_ad(dict_index_is_clust(index)); - ut_ad(!mutex_own(&kernel_mutex)); if (!offsets) { offsets = rec_get_offsets(rec, index, offsets_, @@ -242,22 +241,13 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG - if (rec_offs_any_null_extern(rec, offsets)) { - /* This condition can occur during crash recovery - before trx_rollback_active() has completed execution. - - This condition is possible if the server crashed - during an insert or update-by-delete-and-insert before - btr_store_big_rec_extern_fields() did mtr_commit() all - BLOB pointers to the freshly inserted clustered index - record. */ - ut_a(trx_assert_recovered( - row_get_rec_trx_id(rec, index, offsets))); - ut_a(trx_undo_roll_ptr_is_insert( - row_get_rec_roll_ptr(rec, index, offsets))); - } -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#if 0 && defined UNIV_BLOB_NULL_DEBUG + /* This one can fail in trx_rollback_active() if + the server crashed during an insert before the + btr_store_big_rec_extern_fields() did mtr_commit() + all BLOB pointers to the clustered index record. */ + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* 0 && UNIV_BLOB_NULL_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ @@ -442,10 +432,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG } else { ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ } entry = row_rec_to_index_entry_low(rec, index, offsets, n_ext, heap); diff --git a/storage/innobase/row/row0upd.c b/storage/innobase/row/row0upd.c index 2401fc88553..765233c8dab 100644 --- a/storage/innobase/row/row0upd.c +++ b/storage/innobase/row/row0upd.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1999,46 +1999,28 @@ row_upd_clust_rec( ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur), dict_table_is_comp(index->table))); - err = btr_cur_pessimistic_update( - BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur, - &heap, &big_rec, node->update, node->cmpl_info, thr, mtr); - if (big_rec) { + err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur, + &heap, &big_rec, node->update, + node->cmpl_info, thr, mtr); + mtr_commit(mtr); + + if (err == DB_SUCCESS && big_rec) { ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_t* rec; rec_offs_init(offsets_); - ut_a(err == DB_SUCCESS); - /* Write out the externally stored columns, but - allocate the pages and write the pointers using the - mini-transaction of the record update. If any pages - were freed in the update, temporarily mark them - allocated so that off-page columns will not overwrite - them. We must do this, because we write the redo log - for the BLOB writes before writing the redo log for - the record update. */ + mtr_start(mtr); - btr_mark_freed_leaves(index, mtr, TRUE); + ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr)); rec = btr_cur_get_rec(btr_cur); err = btr_store_big_rec_extern_fields( index, btr_cur_get_block(btr_cur), rec, rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap), - big_rec, mtr, TRUE, mtr); - /* If writing big_rec fails (for example, because of - DB_OUT_OF_FILE_SPACE), the record will be corrupted. - Even if we did not update any externally stored - columns, our update could cause the record to grow so - that a non-updated column was selected for external - storage. This non-update would not have been written - to the undo log, and thus the record cannot be rolled - back. */ - ut_a(err == DB_SUCCESS); - /* Free the pages again in order to avoid a leak. */ - btr_mark_freed_leaves(index, mtr, FALSE); + mtr, TRUE, big_rec); + mtr_commit(mtr); } - mtr_commit(mtr); - if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c index abd065ce1ff..5fd7d082194 100644 --- a/storage/innobase/row/row0vers.c +++ b/storage/innobase/row/row0vers.c @@ -550,10 +550,10 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); @@ -588,9 +588,9 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); @@ -691,9 +691,9 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ if (rec == version) { *old_vers = rec; @@ -752,9 +752,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c index 5b72e0afdf4..397d505df50 100644 --- a/storage/innobase/sync/sync0rw.c +++ b/storage/innobase/sync/sync0rw.c @@ -782,9 +782,7 @@ rw_lock_add_debug_info( rw_lock_debug_mutex_exit(); if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) { - sync_thread_add_level(lock, lock->level, - lock_type == RW_LOCK_EX - && lock->lock_word < 0); + sync_thread_add_level(lock, lock->level); } } diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c index af6e3f0e275..8ea57b8655c 100644 --- a/storage/innobase/sync/sync0sync.c +++ b/storage/innobase/sync/sync0sync.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2011, Innobase Oy. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -690,7 +690,7 @@ mutex_set_debug_info( ut_ad(mutex); ut_ad(file_name); - sync_thread_add_level(mutex, mutex->level, FALSE); + sync_thread_add_level(mutex, mutex->level); mutex->file_name = file_name; mutex->line = line; @@ -1133,9 +1133,8 @@ void sync_thread_add_level( /*==================*/ void* latch, /*!< in: pointer to a mutex or an rw-lock */ - ulint level, /*!< in: level in the latching order; if + ulint level) /*!< in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ - ibool relock) /*!< in: TRUE if re-entering an x-lock */ { ulint i; sync_level_t* slot; @@ -1186,10 +1185,6 @@ sync_thread_add_level( array = thread_slot->levels; - if (relock) { - goto levels_ok; - } - /* NOTE that there is a problem with _NODE and _LEAF levels: if the B-tree height changes, then a leaf can change to an internal node or the other way around. We do not know at present if this can cause @@ -1366,7 +1361,6 @@ sync_thread_add_level( ut_error; } -levels_ok: if (array->next_free == ULINT_UNDEFINED) { ut_a(array->n_elems < array->max_elems); diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c index ad209110e60..0bf41780fcc 100644 --- a/storage/innobase/trx/trx0rec.c +++ b/storage/innobase/trx/trx0rec.c @@ -1621,9 +1621,9 @@ trx_undo_prev_version_build( return(DB_ERROR); } -# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +# ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +# endif /* UNIV_BLOB_NULL_DEBUG */ if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint n_ext; diff --git a/storage/innobase/trx/trx0undo.c b/storage/innobase/trx/trx0undo.c index 805fdcee242..dae0637f72c 100644 --- a/storage/innobase/trx/trx0undo.c +++ b/storage/innobase/trx/trx0undo.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -918,7 +918,7 @@ trx_undo_add_page( page_no = fseg_alloc_free_page_general(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, undo->top_page_no + 1, FSP_UP, - TRUE, mtr, mtr); + TRUE, mtr); fil_space_release_free_extents(undo->space, n_reserved); From 4e26afa01b3c711772e4bd33ae75ac13d6c0ae20 Mon Sep 17 00:00:00 2001 From: Joerg Bruehe Date: Tue, 4 Oct 2011 12:28:30 +0200 Subject: [PATCH 4/4] Exclude NDB man pages from a source tarball, these sources don't have any current NDB. man/CMakeLists.txt: This will need to be modified as soon as NDB is added to the 5.5 sources, then the man page exclusion should be controlled by the build option also governing NDB use. --- man/CMakeLists.txt | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt index 135d66634e4..4987a5bee61 100644 --- a/man/CMakeLists.txt +++ b/man/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -16,15 +16,23 @@ # Copy man pages FILE(GLOB MAN1_FILES *.1) FILE(GLOB MAN1_EXCLUDE make_win_bin_dist.1) +FILE(GLOB MAN1_NDB ndb*.1) FILE(GLOB MAN8_FILES *.8) +FILE(GLOB MAN8_NDB ndb*.8) IF(MAN1_FILES) IF(MAN1_EXCLUDE) LIST(REMOVE_ITEM MAN1_FILES ${MAN1_EXCLUDE}) ENDIF() + IF(MAN1_NDB) + LIST(REMOVE_ITEM MAN1_FILES ${MAN1_NDB}) + ENDIF() INSTALL(FILES ${MAN1_FILES} DESTINATION ${INSTALL_MANDIR}/man1 COMPONENT ManPages) ENDIF() IF(MAN8_FILES) + IF(MAN8_NDB) + LIST(REMOVE_ITEM MAN8_FILES ${MAN8_NDB}) + ENDIF() INSTALL(FILES ${MAN8_FILES} DESTINATION ${INSTALL_MANDIR}/man8 COMPONENT ManPages) ENDIF()