mirror of
https://github.com/MariaDB/server.git
synced 2026-01-06 05:22:24 +03:00
Merge 11.4 into 11.8
This commit is contained in:
@@ -664,11 +664,12 @@ void CorruptedPages::zero_out_free_pages()
|
||||
if (!space)
|
||||
die("Can't find space object for space name %s to check corrupted page",
|
||||
space_name.c_str());
|
||||
mtr_t mtr{nullptr};
|
||||
for (std::set<unsigned>::const_iterator page_it=
|
||||
space_it->second.pages.begin();
|
||||
page_it != space_it->second.pages.end(); ++page_it)
|
||||
{
|
||||
if (fseg_page_is_allocated(space, *page_it))
|
||||
if (fseg_page_is_allocated(&mtr, space, *page_it))
|
||||
{
|
||||
space_info_t &space_info = non_free_pages[space_id];
|
||||
space_info.pages.insert(*page_it);
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"r_engine_stats": {
|
||||
- "pages_accessed": 84
|
||||
+ "pages_accessed": 6
|
||||
- "pages_accessed": 90
|
||||
+ "pages_accessed": 51
|
||||
},
|
||||
"filtered": "REPLACED",
|
||||
"r_total_filtered": 2.43902439,
|
||||
@@ -22,8 +22,8 @@
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"r_engine_stats": {
|
||||
- "pages_accessed": 84
|
||||
+ "pages_accessed": 2
|
||||
- "pages_accessed": 90
|
||||
+ "pages_accessed": 49
|
||||
},
|
||||
"filtered": "REPLACED",
|
||||
"r_total_filtered": 2.43902439,
|
||||
|
||||
@@ -2061,7 +2061,7 @@ ANALYZE
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"r_engine_stats": {
|
||||
"pages_accessed": 84
|
||||
"pages_accessed": 90
|
||||
},
|
||||
"filtered": "REPLACED",
|
||||
"r_total_filtered": 2.43902439,
|
||||
@@ -2093,7 +2093,7 @@ ANALYZE
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"r_engine_stats": {
|
||||
"pages_accessed": 2
|
||||
"pages_accessed": 4
|
||||
},
|
||||
"filtered": "REPLACED",
|
||||
"r_total_filtered": 66.66666667,
|
||||
@@ -2229,7 +2229,7 @@ ANALYZE
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"r_engine_stats": {
|
||||
"pages_accessed": 84
|
||||
"pages_accessed": 90
|
||||
},
|
||||
"filtered": "REPLACED",
|
||||
"r_total_filtered": 2.43902439,
|
||||
@@ -2261,7 +2261,7 @@ ANALYZE
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"r_engine_stats": {
|
||||
"pages_accessed": 2
|
||||
"pages_accessed": 4
|
||||
},
|
||||
"filtered": "REPLACED",
|
||||
"r_total_filtered": 66.66666667,
|
||||
|
||||
@@ -4,21 +4,26 @@ insert into t1 values(1, 2);
|
||||
insert into t2 values(1, 2);
|
||||
SET GLOBAL innodb_fast_shutdown = 0;
|
||||
# restart: --innodb-force-recovery=4
|
||||
SELECT CAST(variable_value AS INTEGER) INTO @read1
|
||||
FROM INFORMATION_SCHEMA.GLOBAL_STATUS
|
||||
WHERE VARIABLE_NAME='innodb_buffer_pool_read_requests';
|
||||
select variable_name,variable_value from information_schema.global_status
|
||||
WHERE variable_name LIKE 'innodb_buffer_pool_%_requests';
|
||||
variable_name variable_value
|
||||
INNODB_BUFFER_POOL_READ_REQUESTS 0
|
||||
INNODB_BUFFER_POOL_WRITE_REQUESTS 0
|
||||
select * from t1;
|
||||
f1 f2
|
||||
1 2
|
||||
SELECT CAST(variable_value AS INTEGER) INTO @read2
|
||||
FROM INFORMATION_SCHEMA.GLOBAL_STATUS
|
||||
WHERE VARIABLE_NAME='innodb_buffer_pool_read_requests';
|
||||
SELECT @read1>0, @read2>@read1;
|
||||
@read1>0 @read2>@read1
|
||||
1 1
|
||||
select variable_name from information_schema.global_status
|
||||
WHERE variable_name LIKE 'innodb_buffer_pool_%_requests' and variable_value>0;
|
||||
variable_name
|
||||
INNODB_BUFFER_POOL_READ_REQUESTS
|
||||
begin;
|
||||
insert into t1 values(2, 3);
|
||||
rollback;
|
||||
select variable_name from information_schema.global_status
|
||||
WHERE variable_name LIKE 'innodb_buffer_pool_%_requests' and variable_value>0;
|
||||
variable_name
|
||||
INNODB_BUFFER_POOL_READ_REQUESTS
|
||||
INNODB_BUFFER_POOL_WRITE_REQUESTS
|
||||
alter table t1 add f3 int not null, algorithm=copy;
|
||||
alter table t1 add f4 int not null, algorithm=inplace;
|
||||
drop index idx on t1;
|
||||
|
||||
@@ -21,25 +21,17 @@ SET GLOBAL innodb_fast_shutdown = 0;
|
||||
--source include/restart_mysqld.inc
|
||||
let $status=`SHOW ENGINE INNODB STATUS`;
|
||||
|
||||
--disable_cursor_protocol
|
||||
SELECT CAST(variable_value AS INTEGER) INTO @read1
|
||||
FROM INFORMATION_SCHEMA.GLOBAL_STATUS
|
||||
WHERE VARIABLE_NAME='innodb_buffer_pool_read_requests';
|
||||
--enable_cursor_protocol
|
||||
|
||||
select variable_name,variable_value from information_schema.global_status
|
||||
WHERE variable_name LIKE 'innodb_buffer_pool_%_requests';
|
||||
select * from t1;
|
||||
|
||||
--disable_cursor_protocol
|
||||
SELECT CAST(variable_value AS INTEGER) INTO @read2
|
||||
FROM INFORMATION_SCHEMA.GLOBAL_STATUS
|
||||
WHERE VARIABLE_NAME='innodb_buffer_pool_read_requests';
|
||||
--enable_cursor_protocol
|
||||
|
||||
SELECT @read1>0, @read2>@read1;
|
||||
select variable_name from information_schema.global_status
|
||||
WHERE variable_name LIKE 'innodb_buffer_pool_%_requests' and variable_value>0;
|
||||
|
||||
begin;
|
||||
insert into t1 values(2, 3);
|
||||
rollback;
|
||||
select variable_name from information_schema.global_status
|
||||
WHERE variable_name LIKE 'innodb_buffer_pool_%_requests' and variable_value>0;
|
||||
|
||||
alter table t1 add f3 int not null, algorithm=copy;
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ Created 6/2/1994 Heikki Tuuri
|
||||
#include "lock0lock.h"
|
||||
#include "trx0trx.h"
|
||||
#include "srv0mon.h"
|
||||
#include "que0que.h"
|
||||
#include "gis0geo.h"
|
||||
#include "dict0boot.h"
|
||||
#include "row0sel.h" /* row_search_max_autoinc() */
|
||||
@@ -371,14 +372,13 @@ btr_root_fseg_adjust_on_import(
|
||||
|
||||
/**************************************************************//**
|
||||
Checks and adjusts the root node of a tree during IMPORT TABLESPACE.
|
||||
@return error code, or DB_SUCCESS */
|
||||
dberr_t
|
||||
btr_root_adjust_on_import(
|
||||
/*======================*/
|
||||
const dict_index_t* index) /*!< in: index tree */
|
||||
@param trx transaction
|
||||
@param index index tree
|
||||
@return error code */
|
||||
dberr_t btr_root_adjust_on_import(trx_t *trx, const dict_index_t *index)
|
||||
{
|
||||
dberr_t err;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
page_t* page;
|
||||
page_zip_des_t* page_zip;
|
||||
dict_table_t* table = index->table;
|
||||
@@ -1006,25 +1006,25 @@ btr_create(
|
||||
/** Free a B-tree except the root page. The root page MUST be freed after
|
||||
this by calling btr_free_root.
|
||||
@param[in,out] block root page
|
||||
@param[in] log_mode mtr logging mode */
|
||||
@param[in] outer_mtr surrounding mini-transaction */
|
||||
static
|
||||
void
|
||||
btr_free_but_not_root(
|
||||
buf_block_t* block,
|
||||
mtr_log_t log_mode
|
||||
const mtr_t& outer_mtr
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
,bool ahi=false
|
||||
#endif
|
||||
)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{outer_mtr.trx};
|
||||
|
||||
ut_ad(fil_page_index_page_check(block->page.frame));
|
||||
ut_ad(!page_has_siblings(block->page.frame));
|
||||
leaf_loop:
|
||||
mtr_start(&mtr);
|
||||
ut_d(mtr.freeing_tree());
|
||||
mtr_set_log_mode(&mtr, log_mode);
|
||||
mtr_set_log_mode(&mtr, outer_mtr.get_log_mode());
|
||||
fil_space_t *space = mtr.set_named_space_id(block->page.id().space());
|
||||
|
||||
if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
|
||||
@@ -1052,7 +1052,7 @@ leaf_loop:
|
||||
}
|
||||
top_loop:
|
||||
mtr_start(&mtr);
|
||||
mtr_set_log_mode(&mtr, log_mode);
|
||||
mtr_set_log_mode(&mtr, outer_mtr.get_log_mode());
|
||||
space = mtr.set_named_space_id(block->page.id().space());
|
||||
|
||||
finished = !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
|
||||
@@ -1071,16 +1071,13 @@ top_loop:
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
TRANSACTIONAL_TARGET
|
||||
#endif
|
||||
/** Clear the index tree and reinitialize the root page, in the
|
||||
rollback of TRX_UNDO_EMPTY. The BTR_SEG_LEAF is freed and reinitialized.
|
||||
@param thr query thread
|
||||
@return error code */
|
||||
dberr_t dict_index_t::clear(que_thr_t *thr)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{thr_get_trx(thr)};
|
||||
mtr.start();
|
||||
if (table->is_temporary())
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
@@ -1101,7 +1098,7 @@ dberr_t dict_index_t::clear(que_thr_t *thr)
|
||||
RW_X_LATCH, guess, BUF_GET, &mtr, &err);
|
||||
if (root_block)
|
||||
{
|
||||
btr_free_but_not_root(root_block, mtr.get_log_mode()
|
||||
btr_free_but_not_root(root_block, mtr
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
,any_ahi_pages()
|
||||
#endif
|
||||
@@ -1133,19 +1130,20 @@ void btr_free_if_exists(fil_space_t *space, uint32_t page,
|
||||
space->zip_size(),
|
||||
index_id, mtr))
|
||||
{
|
||||
btr_free_but_not_root(root, mtr->get_log_mode());
|
||||
btr_free_but_not_root(root, *mtr);
|
||||
mtr->set_named_space(space);
|
||||
btr_free_root(root, *space, mtr);
|
||||
}
|
||||
}
|
||||
|
||||
/** Drop a temporary table
|
||||
@param trx transaction
|
||||
@param table temporary table */
|
||||
void btr_drop_temporary_table(const dict_table_t &table)
|
||||
void btr_drop_temporary_table(trx_t *trx, const dict_table_t &table)
|
||||
{
|
||||
ut_ad(table.is_temporary());
|
||||
ut_ad(table.space == fil_system.temp_space);
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
for (const dict_index_t *index= table.indexes.start; index;
|
||||
index= dict_table_get_next_index(index))
|
||||
@@ -1159,8 +1157,8 @@ void btr_drop_temporary_table(const dict_table_t &table)
|
||||
0, RW_X_LATCH, guess, BUF_GET,
|
||||
&mtr, nullptr))
|
||||
{
|
||||
btr_free_but_not_root(block, MTR_LOG_NO_REDO);
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
btr_free_but_not_root(block, mtr);
|
||||
btr_free_root(block, *fil_system.temp_space, &mtr);
|
||||
mtr.commit();
|
||||
mtr.start();
|
||||
@@ -1179,7 +1177,7 @@ btr_read_autoinc(dict_index_t* index)
|
||||
ut_ad(index->is_primary());
|
||||
ut_ad(index->table->persistent_autoinc);
|
||||
ut_ad(!index->table->is_temporary());
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
dberr_t err;
|
||||
uint64_t autoinc;
|
||||
@@ -1217,7 +1215,7 @@ uint64_t btr_read_autoinc_with_fallback(const dict_table_t *table,
|
||||
ut_ad(!table->is_temporary());
|
||||
|
||||
uint64_t autoinc= 0;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
const dict_index_t *const first_index= dict_table_get_first_index(table);
|
||||
|
||||
@@ -1260,19 +1258,20 @@ uint64_t btr_read_autoinc_with_fallback(const dict_table_t *table,
|
||||
}
|
||||
|
||||
/** Write the next available AUTO_INCREMENT value to PAGE_ROOT_AUTO_INC.
|
||||
@param[in,out] trx transaction
|
||||
@param[in,out] index clustered index
|
||||
@param[in] autoinc the AUTO_INCREMENT value
|
||||
@param[in] reset whether to reset the AUTO_INCREMENT
|
||||
to a possibly smaller value than currently
|
||||
exists in the page */
|
||||
void
|
||||
btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset)
|
||||
void btr_write_autoinc(trx_t *trx, dict_index_t *index, uint64_t autoinc,
|
||||
bool reset)
|
||||
{
|
||||
ut_ad(index->is_primary());
|
||||
ut_ad(index->table->persistent_autoinc);
|
||||
ut_ad(!index->table->is_temporary());
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
fil_space_t *space= index->table->space;
|
||||
if (buf_block_t *root= buf_page_get(page_id_t(space->id, index->page),
|
||||
@@ -4360,8 +4359,6 @@ btr_print_index(
|
||||
}
|
||||
|
||||
mtr_commit(&mtr);
|
||||
|
||||
ut_ad(btr_validate_index(index, 0));
|
||||
}
|
||||
#endif /* UNIV_BTR_PRINT */
|
||||
|
||||
@@ -4706,7 +4703,7 @@ dberr_t
|
||||
btr_validate_level(
|
||||
/*===============*/
|
||||
dict_index_t* index, /*!< in: index tree */
|
||||
const trx_t* trx, /*!< in: transaction or NULL */
|
||||
trx_t* trx, /*!< in: transaction */
|
||||
ulint level) /*!< in: level number */
|
||||
{
|
||||
buf_block_t* block;
|
||||
@@ -4719,7 +4716,7 @@ btr_validate_level(
|
||||
rec_t* rec;
|
||||
page_cur_t cursor;
|
||||
dtuple_t* node_ptr_tuple;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mem_heap_t* heap = mem_heap_create(256);
|
||||
rec_offs* offsets = NULL;
|
||||
rec_offs* offsets2= NULL;
|
||||
@@ -4744,7 +4741,7 @@ btr_validate_level(
|
||||
while (level != btr_page_get_level(page)) {
|
||||
const rec_t* node_ptr;
|
||||
switch (dberr_t e =
|
||||
fseg_page_is_allocated(space,
|
||||
fseg_page_is_allocated(&mtr, space,
|
||||
block->page.id().page_no())) {
|
||||
case DB_SUCCESS_LOCKED_REC:
|
||||
break;
|
||||
@@ -4834,7 +4831,8 @@ func_exit:
|
||||
#endif /* UNIV_ZIP_DEBUG */
|
||||
|
||||
if (DB_SUCCESS_LOCKED_REC
|
||||
!= fseg_page_is_allocated(space, block->page.id().page_no())) {
|
||||
!= fseg_page_is_allocated(&mtr, space,
|
||||
block->page.id().page_no())) {
|
||||
btr_validate_report1(index, level, block);
|
||||
|
||||
ib::warn() << "Page is marked as free";
|
||||
@@ -5137,9 +5135,9 @@ dberr_t
|
||||
btr_validate_index(
|
||||
/*===============*/
|
||||
dict_index_t* index, /*!< in: index */
|
||||
const trx_t* trx) /*!< in: transaction or NULL */
|
||||
trx_t* trx) /*!< in: transaction */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
|
||||
mtr_x_lock_index(index, &mtr);
|
||||
|
||||
@@ -50,7 +50,7 @@ PageBulk::init()
|
||||
m_index->set_modified(m_mtr);
|
||||
|
||||
if (m_page_no == FIL_NULL) {
|
||||
mtr_t alloc_mtr;
|
||||
mtr_t alloc_mtr{m_mtr.trx};
|
||||
dberr_t err= DB_SUCCESS;
|
||||
|
||||
/* We commit redo log for allocation by a separate mtr,
|
||||
@@ -110,7 +110,7 @@ PageBulk::init()
|
||||
m_page_zip = buf_block_get_page_zip(new_block);
|
||||
|
||||
if (!m_level && !m_index->is_primary()) {
|
||||
page_update_max_trx_id(new_block, m_page_zip, m_trx_id,
|
||||
page_update_max_trx_id(new_block, m_page_zip, m_mtr.trx->id,
|
||||
&m_mtr);
|
||||
}
|
||||
|
||||
@@ -872,7 +872,7 @@ BtrBulk::pageSplit(
|
||||
}
|
||||
|
||||
/* Initialize a new page */
|
||||
PageBulk new_page_bulk(m_index, m_trx->id, FIL_NULL,
|
||||
PageBulk new_page_bulk(m_index, m_trx, FIL_NULL,
|
||||
page_bulk->getLevel());
|
||||
dberr_t err = new_page_bulk.init();
|
||||
if (err != DB_SUCCESS) {
|
||||
@@ -1004,7 +1004,7 @@ BtrBulk::insert(
|
||||
/* Check if we need to create a PageBulk for the level. */
|
||||
if (level + 1 > m_page_bulks.size()) {
|
||||
PageBulk* new_page_bulk
|
||||
= UT_NEW_NOKEY(PageBulk(m_index, m_trx->id, FIL_NULL,
|
||||
= UT_NEW_NOKEY(PageBulk(m_index, m_trx, FIL_NULL,
|
||||
level));
|
||||
err = new_page_bulk->init();
|
||||
if (err != DB_SUCCESS) {
|
||||
@@ -1058,7 +1058,7 @@ BtrBulk::insert(
|
||||
if (!page_bulk->isSpaceAvailable(rec_size)) {
|
||||
/* Create a sibling page_bulk. */
|
||||
PageBulk* sibling_page_bulk;
|
||||
sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx->id,
|
||||
sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx,
|
||||
FIL_NULL, level));
|
||||
err = sibling_page_bulk->init();
|
||||
if (err != DB_SUCCESS) {
|
||||
@@ -1169,9 +1169,9 @@ BtrBulk::finish(dberr_t err)
|
||||
|
||||
if (err == DB_SUCCESS) {
|
||||
rec_t* first_rec;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{m_trx};
|
||||
buf_block_t* last_block;
|
||||
PageBulk root_page_bulk(m_index, m_trx->id,
|
||||
PageBulk root_page_bulk(m_index, m_trx,
|
||||
m_index->page, m_root_level);
|
||||
|
||||
mtr.start();
|
||||
@@ -1214,6 +1214,6 @@ err_exit:
|
||||
}
|
||||
|
||||
ut_ad(err != DB_SUCCESS
|
||||
|| btr_validate_index(m_index, NULL) == DB_SUCCESS);
|
||||
|| btr_validate_index(m_index, m_trx) == DB_SUCCESS);
|
||||
return(err);
|
||||
}
|
||||
|
||||
@@ -454,28 +454,28 @@ inconsistent:
|
||||
|
||||
/** Load the instant ALTER TABLE metadata from the clustered index
|
||||
when loading a table definition.
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in,out] table table definition from the data dictionary
|
||||
@return error code
|
||||
@retval DB_SUCCESS if no error occurred */
|
||||
dberr_t btr_cur_instant_init(dict_table_t *table)
|
||||
dberr_t btr_cur_instant_init(mtr_t *mtr, dict_table_t *table)
|
||||
{
|
||||
mtr_t mtr;
|
||||
dict_index_t *index= dict_table_get_first_index(table);
|
||||
mtr.start();
|
||||
dberr_t err = index ? btr_cur_instant_init_low(index, &mtr) : DB_CORRUPTION;
|
||||
mtr.commit();
|
||||
mtr->start();
|
||||
dberr_t err= index ? btr_cur_instant_init_low(index, mtr) : DB_CORRUPTION;
|
||||
mtr->commit();
|
||||
if (err == DB_SUCCESS && index->is_gen_clust())
|
||||
{
|
||||
btr_cur_t cur;
|
||||
mtr.start();
|
||||
err= cur.open_leaf(false, index, BTR_SEARCH_LEAF, &mtr);
|
||||
mtr->start();
|
||||
err= cur.open_leaf(false, index, BTR_SEARCH_LEAF, mtr);
|
||||
if (err != DB_SUCCESS);
|
||||
else if (const rec_t *rec= page_rec_get_prev(btr_cur_get_rec(&cur)))
|
||||
if (page_rec_is_user_rec(rec))
|
||||
table->row_id= mach_read_from_6(rec);
|
||||
mtr.commit();
|
||||
mtr->commit();
|
||||
}
|
||||
return(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/** Initialize the n_core_null_bytes on first access to a clustered
|
||||
@@ -966,7 +966,7 @@ MY_ATTRIBUTE((nonnull,warn_unused_result))
|
||||
@retval 1 if the page could be latched in the wrong order
|
||||
@retval -1 if the latch on block was temporarily released */
|
||||
static int btr_latch_prev(rw_lock_type_t rw_latch,
|
||||
page_id_t page_id, dberr_t *err, mtr_t *mtr)
|
||||
page_id_t page_id, dberr_t *err, mtr_t *mtr) noexcept
|
||||
{
|
||||
ut_ad(rw_latch == RW_S_LATCH || rw_latch == RW_X_LATCH);
|
||||
|
||||
@@ -989,7 +989,8 @@ static int btr_latch_prev(rw_lock_type_t rw_latch,
|
||||
|
||||
retry:
|
||||
int ret= 1;
|
||||
buf_block_t *prev= buf_pool.page_fix(page_id, err, buf_pool_t::FIX_NOWAIT);
|
||||
buf_block_t *prev=
|
||||
buf_pool.page_fix(page_id, err, mtr->trx, buf_pool_t::FIX_NOWAIT);
|
||||
if (UNIV_UNLIKELY(!prev))
|
||||
return 0;
|
||||
if (prev == reinterpret_cast<buf_block_t*>(-1))
|
||||
@@ -1006,7 +1007,7 @@ static int btr_latch_prev(rw_lock_type_t rw_latch,
|
||||
else
|
||||
block->page.lock.x_unlock();
|
||||
|
||||
prev= buf_pool.page_fix(page_id, err, buf_pool_t::FIX_WAIT_READ);
|
||||
prev= buf_pool.page_fix(page_id, err, mtr->trx, buf_pool_t::FIX_WAIT_READ);
|
||||
|
||||
if (!prev)
|
||||
{
|
||||
@@ -3294,11 +3295,12 @@ btr_cur_update_in_place(
|
||||
/** Trim a metadata record during the rollback of instant ALTER TABLE.
|
||||
@param[in] entry metadata tuple
|
||||
@param[in] index primary key
|
||||
@param[in] update update vector for the rollback */
|
||||
@param[in] update update vector for the rollback
|
||||
@param[in,out] trx transaction */
|
||||
ATTRIBUTE_COLD
|
||||
static void btr_cur_trim_alter_metadata(dtuple_t* entry,
|
||||
const dict_index_t* index,
|
||||
const upd_t* update)
|
||||
const upd_t* update, trx_t *trx)
|
||||
{
|
||||
ut_ad(index->is_instant());
|
||||
ut_ad(update->is_alter_metadata());
|
||||
@@ -3328,7 +3330,7 @@ static void btr_cur_trim_alter_metadata(dtuple_t* entry,
|
||||
|
||||
/* This is based on dict_table_t::deserialise_columns()
|
||||
and btr_cur_instant_init_low(). */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
buf_block_t* block = buf_page_get(
|
||||
page_id_t(index->table->space->id,
|
||||
@@ -3391,8 +3393,9 @@ btr_cur_trim(
|
||||
already executed) or rolling back such an operation. */
|
||||
ut_ad(!upd_get_nth_field(update, 0)->orig_len);
|
||||
ut_ad(entry->is_metadata());
|
||||
trx_t* const trx{thr->graph->trx};
|
||||
|
||||
if (thr->graph->trx->in_rollback) {
|
||||
if (trx->in_rollback) {
|
||||
/* This rollback can occur either as part of
|
||||
ha_innobase::commit_inplace_alter_table() rolling
|
||||
back after a failed innobase_add_instant_try(),
|
||||
@@ -3409,7 +3412,7 @@ btr_cur_trim(
|
||||
ut_ad(update->n_fields > 2);
|
||||
if (update->is_alter_metadata()) {
|
||||
btr_cur_trim_alter_metadata(
|
||||
entry, index, update);
|
||||
entry, index, update, trx);
|
||||
return;
|
||||
}
|
||||
ut_ad(!entry->is_alter_metadata());
|
||||
@@ -5158,20 +5161,19 @@ inexact:
|
||||
return (n_rows);
|
||||
}
|
||||
|
||||
/** Estimates the number of rows in a given index range. Do search in the left
|
||||
page, then if there are pages between left and right ones, read a few pages to
|
||||
the right, if the right page is reached, count the exact number of rows without
|
||||
fetching the right page, the right page will be fetched in the caller of this
|
||||
function and the amount of its rows will be added. If the right page is not
|
||||
reached, count the estimated(see btr_estimate_n_rows_in_range_on_level() for
|
||||
details) rows number, and fetch the right page. If leaves are reached, unlatch
|
||||
non-leaf pages except the right leaf parent. After the right leaf page is
|
||||
fetched, commit mtr.
|
||||
@param[in] index index
|
||||
@param[in] range_start range start
|
||||
@param[in] range_end range end
|
||||
/** Estimates the number of rows in a given index range. Do search in the
|
||||
left page, then if there are pages between left and right ones, read a few
|
||||
pages to the right, if the right page is reached, fetch it and count the exact
|
||||
number of rows, otherwise count the estimated(see
|
||||
btr_estimate_n_rows_in_range_on_level() for details) number if rows, and
|
||||
fetch the right page. If leaves are reached, unlatch non-leaf pages except
|
||||
the right leaf parent. After the right leaf page is fetched, commit mtr.
|
||||
@param trx transaction
|
||||
@param index B-tree
|
||||
@param range_start first key
|
||||
@param range_end last key
|
||||
@return estimated number of rows; */
|
||||
ha_rows btr_estimate_n_rows_in_range(dict_index_t *index,
|
||||
ha_rows btr_estimate_n_rows_in_range(trx_t *trx, dict_index_t *index,
|
||||
btr_pos_t *range_start,
|
||||
btr_pos_t *range_end)
|
||||
{
|
||||
@@ -5182,9 +5184,9 @@ ha_rows btr_estimate_n_rows_in_range(dict_index_t *index,
|
||||
|
||||
ut_ad(index->is_btree());
|
||||
|
||||
mtr_t mtr{trx};
|
||||
btr_est_cur_t p1(index, *range_start->tuple, range_start->mode);
|
||||
btr_est_cur_t p2(index, *range_end->tuple, range_end->mode);
|
||||
mtr_t mtr;
|
||||
|
||||
ulint height;
|
||||
ulint root_height= 0; /* remove warning */
|
||||
@@ -5794,7 +5796,7 @@ btr_store_big_rec_extern_fields(
|
||||
ulint extern_len;
|
||||
ulint store_len;
|
||||
ulint i;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{btr_mtr->trx};
|
||||
mem_heap_t* heap = NULL;
|
||||
page_zip_des_t* page_zip;
|
||||
z_stream c_stream;
|
||||
@@ -6250,9 +6252,7 @@ btr_free_externally_stored_field(
|
||||
/* !rec holds in a call from purge when field_ref is in an undo page */
|
||||
ut_ad(rec || !block->page.zip.data);
|
||||
|
||||
for (;;) {
|
||||
mtr_t mtr;
|
||||
|
||||
for (mtr_t mtr{local_mtr->trx};;) {
|
||||
mtr.start();
|
||||
mtr.set_spaces(*local_mtr);
|
||||
mtr.set_log_mode_sub(*local_mtr);
|
||||
@@ -6456,9 +6456,9 @@ btr_copy_blob_prefix(
|
||||
uint32_t offset) /*!< in: offset on the first BLOB page */
|
||||
{
|
||||
ulint copied_len = 0;
|
||||
THD* thd{current_thd};
|
||||
|
||||
for (;;) {
|
||||
mtr_t mtr;
|
||||
for (mtr_t mtr{thd ? thd_to_trx(thd) : nullptr};;) {
|
||||
buf_block_t* block;
|
||||
const page_t* page;
|
||||
const byte* blob_header;
|
||||
|
||||
@@ -34,6 +34,7 @@ Created 2/17/1996 Heikki Tuuri
|
||||
#include "btr0pcur.h"
|
||||
#include "btr0btr.h"
|
||||
#include "srv0mon.h"
|
||||
#include "trx0trx.h"
|
||||
#include "log.h"
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
@@ -1089,13 +1090,14 @@ btr_search_guess_on_hash(
|
||||
we validate the guessed rec. */
|
||||
part.latch.rd_unlock();
|
||||
|
||||
if (mtr->trx)
|
||||
buf_inc_get(mtr->trx);
|
||||
|
||||
block->page.fix();
|
||||
buf_page_make_young_if_needed(&block->page);
|
||||
static_assert(ulint{MTR_MEMO_PAGE_S_FIX} == ulint{BTR_SEARCH_LEAF}, "");
|
||||
static_assert(ulint{MTR_MEMO_PAGE_X_FIX} == ulint{BTR_MODIFY_LEAF}, "");
|
||||
|
||||
++buf_pool.stat.n_page_gets;
|
||||
|
||||
mtr->memo_push(block, mtr_memo_type_t(latch_mode));
|
||||
|
||||
ut_ad(page_rec_is_user_rec(rec));
|
||||
@@ -1326,16 +1328,16 @@ void btr_search_drop_page_hash_index(buf_block_t *block,
|
||||
btr_search_drop_page_hash_index(block, not_garbage, folds);
|
||||
}
|
||||
|
||||
void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept
|
||||
void btr_search_drop_page_hash_when_freed(mtr_t *mtr, const page_id_t page_id)
|
||||
noexcept
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr.start();
|
||||
/* If the caller has a latch on the page, then the caller must be an
|
||||
x-latch page and it must have already dropped the hash index for the
|
||||
page. Because of the x-latch that we are possibly holding, we must
|
||||
(recursively) x-latch it, even though we are only reading. */
|
||||
auto sp= mtr->get_savepoint();
|
||||
if (buf_block_t *block= buf_page_get_gen(page_id, 0, RW_X_LATCH, nullptr,
|
||||
BUF_PEEK_IF_IN_POOL, &mtr))
|
||||
BUF_PEEK_IF_IN_POOL, mtr))
|
||||
{
|
||||
/* In all our callers, the table handle should be open, or we
|
||||
should be in the process of dropping the table (preventing eviction). */
|
||||
@@ -1344,7 +1346,7 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept
|
||||
btr_search_drop_page_hash_index(block, nullptr);
|
||||
}
|
||||
|
||||
mtr.commit();
|
||||
mtr->rollback_to_savepoint(sp);
|
||||
}
|
||||
|
||||
/** Build a hash index on a page with the given parameters. If the page already
|
||||
|
||||
@@ -28,7 +28,6 @@ Created 11/5/1995 Heikki Tuuri
|
||||
#include "mtr0types.h"
|
||||
#include "mach0data.h"
|
||||
#include "buf0checksum.h"
|
||||
#include "mariadb_stats.h"
|
||||
#include <string.h>
|
||||
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
@@ -325,6 +324,26 @@ static constexpr size_t pages_in_extent[]=
|
||||
pages(4096), pages(8192), pages(16384), pages(32768), pages(65536)
|
||||
};
|
||||
|
||||
void buf_inc_get(trx_t *trx) noexcept
|
||||
{
|
||||
trx->pages_accessed++;
|
||||
if (ha_handler_stats *stats= trx->active_handler_stats)
|
||||
stats->pages_accessed++;
|
||||
}
|
||||
|
||||
void buf_inc_get() noexcept
|
||||
{
|
||||
if (THD *thd= current_thd)
|
||||
if (trx_t *trx= thd_to_trx(thd))
|
||||
buf_inc_get(trx);
|
||||
}
|
||||
|
||||
static ATTRIBUTE_NOINLINE void buf_inc_read(trx_t *trx) noexcept
|
||||
{
|
||||
if (ha_handler_stats *stats= trx->active_handler_stats)
|
||||
stats->pages_read_count++;
|
||||
}
|
||||
|
||||
# ifdef SUX_LOCK_GENERIC
|
||||
void page_hash_latch::read_lock_wait() noexcept
|
||||
{
|
||||
@@ -2227,7 +2246,7 @@ void buf_page_free(fil_space_t *space, uint32_t page, mtr_t *mtr)
|
||||
)
|
||||
mtr->add_freed_offset(space, page);
|
||||
|
||||
++buf_pool.stat.n_page_gets;
|
||||
buf_inc_get();
|
||||
const page_id_t page_id(space->id, page);
|
||||
buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold());
|
||||
uint32_t fix;
|
||||
@@ -2261,17 +2280,12 @@ void buf_page_free(fil_space_t *space, uint32_t page, mtr_t *mtr)
|
||||
mtr->memo_push(block, MTR_MEMO_PAGE_X_MODIFY);
|
||||
}
|
||||
|
||||
static void buf_inc_get(ha_handler_stats *stats)
|
||||
{
|
||||
mariadb_increment_pages_accessed(stats);
|
||||
++buf_pool.stat.n_page_gets;
|
||||
}
|
||||
|
||||
TRANSACTIONAL_TARGET
|
||||
buf_page_t *buf_page_get_zip(const page_id_t page_id) noexcept
|
||||
{
|
||||
ha_handler_stats *const stats= mariadb_stats;
|
||||
buf_inc_get(stats);
|
||||
THD *const thd= current_thd;
|
||||
trx_t *const trx= thd ? thd_to_trx(thd) : nullptr;
|
||||
if (trx) buf_inc_get(trx);
|
||||
|
||||
buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold());
|
||||
page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain);
|
||||
@@ -2303,7 +2317,7 @@ buf_page_t *buf_page_get_zip(const page_id_t page_id) noexcept
|
||||
switch (dberr_t err= buf_read_page(page_id, chain, false)) {
|
||||
case DB_SUCCESS:
|
||||
case DB_SUCCESS_LOCKED_REC:
|
||||
mariadb_increment_pages_read(stats);
|
||||
if (trx) buf_inc_read(trx);
|
||||
continue;
|
||||
case DB_TABLESPACE_DELETED:
|
||||
return nullptr;
|
||||
@@ -2526,11 +2540,10 @@ buf_block_t *buf_pool_t::unzip(buf_page_t *b, buf_pool_t::hash_chain &chain)
|
||||
}
|
||||
|
||||
buf_block_t *buf_pool_t::page_fix(const page_id_t id,
|
||||
dberr_t *err,
|
||||
dberr_t *err, trx_t *trx,
|
||||
buf_pool_t::page_fix_conflicts c) noexcept
|
||||
{
|
||||
ha_handler_stats *const stats= mariadb_stats;
|
||||
buf_inc_get(stats);
|
||||
if (trx) buf_inc_get(trx);
|
||||
auto& chain= page_hash.cell_get(id.fold());
|
||||
page_hash_latch &hash_lock= page_hash.lock_get(chain);
|
||||
for (;;)
|
||||
@@ -2618,7 +2631,7 @@ buf_block_t *buf_pool_t::page_fix(const page_id_t id,
|
||||
return nullptr;
|
||||
case DB_SUCCESS:
|
||||
case DB_SUCCESS_LOCKED_REC:
|
||||
mariadb_increment_pages_read(stats);
|
||||
if (trx) buf_inc_read(trx);
|
||||
buf_read_ahead_random(id);
|
||||
}
|
||||
}
|
||||
@@ -2740,8 +2753,9 @@ buf_page_get_gen(
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
ha_handler_stats* const stats = mariadb_stats;
|
||||
buf_inc_get(stats);
|
||||
THD *const thd = current_thd;
|
||||
trx_t *const trx= thd ? thd_to_trx(thd) : nullptr;
|
||||
if (trx) buf_inc_get(trx);
|
||||
auto& chain= buf_pool.page_hash.cell_get(page_id.fold());
|
||||
page_hash_latch& hash_lock = buf_pool.page_hash.lock_get(chain);
|
||||
loop:
|
||||
@@ -2787,7 +2801,7 @@ loop:
|
||||
switch (dberr_t local_err = buf_read_page(page_id, chain)) {
|
||||
case DB_SUCCESS:
|
||||
case DB_SUCCESS_LOCKED_REC:
|
||||
mariadb_increment_pages_read(stats);
|
||||
if (trx) buf_inc_read(trx);
|
||||
buf_read_ahead_random(page_id);
|
||||
break;
|
||||
default:
|
||||
@@ -3055,7 +3069,7 @@ buf_block_t *buf_page_try_get(const page_id_t page_id, mtr_t *mtr) noexcept
|
||||
ut_ad(block->page.buf_fix_count());
|
||||
ut_ad(block->page.id() == page_id);
|
||||
|
||||
buf_inc_get(mariadb_stats);
|
||||
buf_inc_get();
|
||||
return block;
|
||||
}
|
||||
|
||||
@@ -3982,25 +3996,24 @@ void buf_pool_t::get_info(buf_pool_info_t *pool_info) noexcept
|
||||
|
||||
double elapsed= 0.001 + difftime(time(nullptr), last_printout_time);
|
||||
|
||||
pool_info->n_pages_made_young= stat.n_pages_made_young;
|
||||
pool_info->page_made_young_rate=
|
||||
double(stat.n_pages_made_young - old_stat.n_pages_made_young) /
|
||||
elapsed;
|
||||
pool_info->n_pages_not_made_young= stat.n_pages_not_made_young;
|
||||
pool_info->page_not_made_young_rate=
|
||||
double(stat.n_pages_not_made_young - old_stat.n_pages_not_made_young) /
|
||||
elapsed;
|
||||
pool_info->n_page_gets= stat.n_page_gets;
|
||||
pool_info->n_pages_read= stat.n_pages_read;
|
||||
pool_info->n_pages_written= stat.n_pages_written;
|
||||
pool_info->n_pages_created= stat.n_pages_created;
|
||||
pool_info->n_ra_pages_read_rnd= stat.n_ra_pages_read_rnd;
|
||||
pool_info->n_ra_pages_read= stat.n_ra_pages_read;
|
||||
pool_info->n_ra_pages_evicted= stat.n_ra_pages_evicted;
|
||||
pool_info->n_pages_made_young= stat.n_pages_made_young;
|
||||
pool_info->n_pages_not_made_young= stat.n_pages_not_made_young;
|
||||
|
||||
pool_info->pages_read_rate=
|
||||
double(stat.n_pages_read - old_stat.n_pages_read) / elapsed;
|
||||
pool_info->n_pages_created= stat.n_pages_created;
|
||||
pool_info->pages_created_rate=
|
||||
double(stat.n_pages_created - old_stat.n_pages_created) / elapsed;
|
||||
pool_info->n_pages_written= stat.n_pages_written;
|
||||
pool_info->pages_written_rate=
|
||||
double(stat.n_pages_written - old_stat.n_pages_written) / elapsed;
|
||||
pool_info->n_page_gets= stat.n_page_gets;
|
||||
pool_info->n_page_get_delta= stat.n_page_gets - old_stat.n_page_gets;
|
||||
pool_info->n_page_get_delta= pool_info->n_page_gets -
|
||||
old_stat.n_page_gets_nonatomic;
|
||||
if (pool_info->n_page_get_delta)
|
||||
{
|
||||
pool_info->page_read_delta= stat.n_pages_read - old_stat.n_pages_read;
|
||||
@@ -4009,13 +4022,18 @@ void buf_pool_t::get_info(buf_pool_info_t *pool_info) noexcept
|
||||
pool_info->not_young_making_delta=
|
||||
stat.n_pages_not_made_young - old_stat.n_pages_not_made_young;
|
||||
}
|
||||
pool_info->n_ra_pages_read_rnd= stat.n_ra_pages_read_rnd;
|
||||
|
||||
pool_info->page_made_young_rate=
|
||||
double(stat.n_pages_made_young - old_stat.n_pages_made_young) /
|
||||
elapsed;
|
||||
pool_info->page_not_made_young_rate=
|
||||
double(stat.n_pages_not_made_young - old_stat.n_pages_not_made_young) /
|
||||
elapsed;
|
||||
|
||||
pool_info->pages_readahead_rnd_rate=
|
||||
double(stat.n_ra_pages_read_rnd - old_stat.n_ra_pages_read_rnd) / elapsed;
|
||||
pool_info->n_ra_pages_read= stat.n_ra_pages_read;
|
||||
pool_info->pages_readahead_rate=
|
||||
double(stat.n_ra_pages_read - old_stat.n_ra_pages_read) / elapsed;
|
||||
pool_info->n_ra_pages_evicted= stat.n_ra_pages_evicted;
|
||||
pool_info->pages_evicted_rate=
|
||||
double(stat.n_ra_pages_evicted - old_stat.n_ra_pages_evicted) / elapsed;
|
||||
pool_info->unzip_lru_len= UT_LIST_GET_LEN(unzip_LRU);
|
||||
|
||||
@@ -87,7 +87,7 @@ bool buf_dblwr_t::create() noexcept
|
||||
if (is_created())
|
||||
return true;
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
const ulint size= block_size;
|
||||
|
||||
start_again:
|
||||
|
||||
@@ -1273,7 +1273,7 @@ void buf_LRU_truncate_temp(uint32_t threshold)
|
||||
0, fil_system.temp_space->free_limit);
|
||||
cur_xdes_page >= threshold;)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
if (buf_block_t* block= buf_page_get_gen(
|
||||
page_id_t(SRV_TMP_SPACE_ID, cur_xdes_page), 0, RW_X_LATCH,
|
||||
|
||||
@@ -42,7 +42,6 @@ Created 11/5/1995 Heikki Tuuri
|
||||
#include "srv0start.h"
|
||||
#include "srv0srv.h"
|
||||
#include "log.h"
|
||||
#include "mariadb_stats.h"
|
||||
|
||||
TRANSACTIONAL_TARGET
|
||||
bool buf_pool_t::page_hash_contains(const page_id_t page_id, hash_chain &chain)
|
||||
@@ -187,6 +186,15 @@ func_exit:
|
||||
return bpage;
|
||||
}
|
||||
|
||||
inline ulonglong mariadb_measure() noexcept
|
||||
{
|
||||
#if (MY_TIMER_ROUTINE_CYCLES)
|
||||
return my_timer_cycles();
|
||||
#else
|
||||
return my_timer_microseconds();
|
||||
#endif
|
||||
}
|
||||
|
||||
/** Low-level function which reads a page asynchronously from a file to the
|
||||
buffer buf_pool if it is not already there, in which case does nothing.
|
||||
Sets the io_fix flag and sets an exclusive lock on the buffer frame. The
|
||||
@@ -198,7 +206,8 @@ flag is cleared and the x-lock released by an i/o-handler thread.
|
||||
@param[in,out] chain buf_pool.page_hash cell for page_id
|
||||
@param[in,out] space tablespace
|
||||
@param[in,out] block preallocated buffer block
|
||||
@param[in] sync true if synchronous aio is desired
|
||||
@param[in] thd current_thd if sync
|
||||
@param[in] sync whether synchronous aio is desired
|
||||
@return error code
|
||||
@retval DB_SUCCESS if the page was read
|
||||
@retval DB_SUCCESS_LOCKED_REC if the page exists in the buffer pool already */
|
||||
@@ -210,6 +219,7 @@ buf_read_page_low(
|
||||
buf_pool_t::hash_chain& chain,
|
||||
fil_space_t* space,
|
||||
buf_block_t*& block,
|
||||
THD* thd = nullptr,
|
||||
bool sync = false) noexcept
|
||||
{
|
||||
buf_page_t* bpage;
|
||||
@@ -228,14 +238,12 @@ buf_read_page_low(
|
||||
|
||||
ut_ad(bpage->in_file());
|
||||
ulonglong mariadb_timer = 0;
|
||||
trx_t *const trx= thd ? thd_to_trx(thd) : nullptr;
|
||||
|
||||
if (sync) {
|
||||
thd_wait_begin(nullptr, THD_WAIT_DISKIO);
|
||||
if (const ha_handler_stats *stats = mariadb_stats) {
|
||||
if (stats->active) {
|
||||
mariadb_timer = mariadb_measure();
|
||||
}
|
||||
}
|
||||
thd_wait_begin(thd, THD_WAIT_DISKIO);
|
||||
|
||||
if (trx && trx->active_handler_stats) {
|
||||
mariadb_timer = mariadb_measure();
|
||||
}
|
||||
|
||||
DBUG_LOG("ib_buf",
|
||||
@@ -255,12 +263,13 @@ buf_read_page_low(
|
||||
recv_sys.free_corrupted_page(page_id, *space->chain.start);
|
||||
buf_pool.corrupted_evict(bpage, buf_page_t::READ_FIX);
|
||||
} else if (sync) {
|
||||
thd_wait_end(nullptr);
|
||||
thd_wait_end(thd);
|
||||
/* The i/o was already completed in space->io() */
|
||||
fio.err = bpage->read_complete(*fio.node);
|
||||
space->release();
|
||||
if (mariadb_timer) {
|
||||
mariadb_increment_pages_read_time(mariadb_timer);
|
||||
trx->active_handler_stats->pages_read_time
|
||||
+= mariadb_measure() - mariadb_timer;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,6 +293,24 @@ static void buf_read_release(buf_block_t *block)
|
||||
}
|
||||
}
|
||||
|
||||
/** Report a completed read-ahead batch.
|
||||
@param space tablespace
|
||||
@param count number of pages submitted for reading */
|
||||
static ATTRIBUTE_NOINLINE
|
||||
void buf_read_ahead_report(const fil_space_t &space, size_t count) noexcept
|
||||
{
|
||||
if (THD *thd= current_thd)
|
||||
if (trx_t *trx= thd_to_trx(thd))
|
||||
if (ha_handler_stats *stats= trx->active_handler_stats)
|
||||
stats->pages_prefetched+= count;
|
||||
mysql_mutex_lock(&buf_pool.mutex);
|
||||
/* Read ahead is considered one I/O operation for the purpose of
|
||||
LRU policy decision. */
|
||||
buf_LRU_stat_inc_io();
|
||||
buf_pool.stat.n_ra_pages_read_rnd+= count;
|
||||
mysql_mutex_unlock(&buf_pool.mutex);
|
||||
}
|
||||
|
||||
/** Applies a random read-ahead in buf_pool if there are at least a threshold
|
||||
value of accessed pages from the random read-ahead area. Does not read any
|
||||
page, not even the one at the position (space, offset), if the read-ahead
|
||||
@@ -371,18 +398,7 @@ read_ahead:
|
||||
}
|
||||
|
||||
if (count)
|
||||
{
|
||||
mariadb_increment_pages_prefetched(count);
|
||||
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
||||
count, space->chain.start->name,
|
||||
low.page_no()));
|
||||
mysql_mutex_lock(&buf_pool.mutex);
|
||||
/* Read ahead is considered one I/O operation for the purpose of
|
||||
LRU policy decision. */
|
||||
buf_LRU_stat_inc_io();
|
||||
buf_pool.stat.n_ra_pages_read_rnd+= count;
|
||||
mysql_mutex_unlock(&buf_pool.mutex);
|
||||
}
|
||||
buf_read_ahead_report(*space, count);
|
||||
|
||||
space->release();
|
||||
buf_read_release(block);
|
||||
@@ -422,7 +438,8 @@ dberr_t buf_read_page(const page_id_t page_id,
|
||||
goto allocate_block;
|
||||
}
|
||||
|
||||
dberr_t err= buf_read_page_low(page_id, zip_size, chain, space, block, true);
|
||||
dberr_t err= buf_read_page_low(page_id, zip_size, chain, space, block,
|
||||
current_thd, true);
|
||||
buf_read_release(block);
|
||||
return err;
|
||||
}
|
||||
@@ -663,18 +680,7 @@ failed:
|
||||
}
|
||||
|
||||
if (count)
|
||||
{
|
||||
mariadb_increment_pages_prefetched(count);
|
||||
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
||||
count, space->chain.start->name,
|
||||
new_low.page_no()));
|
||||
mysql_mutex_lock(&buf_pool.mutex);
|
||||
/* Read ahead is considered one I/O operation for the purpose of
|
||||
LRU policy decision. */
|
||||
buf_LRU_stat_inc_io();
|
||||
buf_pool.stat.n_ra_pages_read+= count;
|
||||
mysql_mutex_unlock(&buf_pool.mutex);
|
||||
}
|
||||
buf_read_ahead_report(*space, count);
|
||||
|
||||
space->release();
|
||||
buf_read_release(block);
|
||||
|
||||
@@ -52,6 +52,7 @@ Returns a new table, index, or space id. */
|
||||
void
|
||||
dict_hdr_get_new_id(
|
||||
/*================*/
|
||||
trx_t* trx, /*!< in/out: transaction */
|
||||
table_id_t* table_id, /*!< out: table id
|
||||
(not assigned if NULL) */
|
||||
index_id_t* index_id, /*!< out: index id
|
||||
@@ -60,7 +61,7 @@ dict_hdr_get_new_id(
|
||||
(not assigned if NULL) */
|
||||
{
|
||||
ib_id_t id;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
|
||||
mtr.start();
|
||||
buf_block_t* dict_hdr = dict_hdr_get(&mtr);
|
||||
@@ -103,7 +104,7 @@ dberr_t dict_create()
|
||||
ulint root_page_no;
|
||||
|
||||
dberr_t err;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
compile_time_assert(DICT_HDR_SPACE == 0);
|
||||
|
||||
@@ -198,7 +199,7 @@ dberr_t dict_boot()
|
||||
dict_table_t* table;
|
||||
dict_index_t* index;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
static_assert(DICT_NUM_COLS__SYS_TABLES == 8, "compatibility");
|
||||
static_assert(DICT_NUM_FIELDS__SYS_TABLES == 10, "compatibility");
|
||||
|
||||
@@ -348,7 +348,10 @@ dict_build_table_def_step(
|
||||
ut_ad(!table->is_temporary());
|
||||
ut_ad(!table->space);
|
||||
ut_ad(table->space_id == UINT32_MAX);
|
||||
dict_hdr_get_new_id(&table->id, nullptr, nullptr);
|
||||
dict_hdr_get_new_id(thr_get_trx(thr), &table->id, nullptr,
|
||||
DICT_TF2_FLAG_IS_SET(table,
|
||||
DICT_TF2_USE_FILE_PER_TABLE)
|
||||
? &table->space_id : nullptr);
|
||||
|
||||
/* Always set this bit for all new created tables */
|
||||
DICT_TF2_FLAG_SET(table, DICT_TF2_FTS_AUX_HEX_NAME);
|
||||
@@ -358,8 +361,6 @@ dict_build_table_def_step(
|
||||
|
||||
ut_ad(DICT_TF_GET_ZIP_SSIZE(table->flags) == 0
|
||||
|| dict_table_has_atomic_blobs(table));
|
||||
/* Get a new tablespace ID */
|
||||
dict_hdr_get_new_id(NULL, NULL, &table->space_id);
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"ib_create_table_fail_out_of_space_ids",
|
||||
@@ -664,7 +665,7 @@ dict_build_index_def_step(
|
||||
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|
||||
|| dict_index_is_clust(index));
|
||||
|
||||
dict_hdr_get_new_id(NULL, &index->id, NULL);
|
||||
dict_hdr_get_new_id(trx, NULL, &index->id, NULL);
|
||||
|
||||
node->page_no = FIL_NULL;
|
||||
row = dict_create_sys_indexes_tuple(index, node->heap);
|
||||
@@ -696,7 +697,7 @@ dict_build_index_def(
|
||||
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|
||||
|| dict_index_is_clust(index));
|
||||
|
||||
dict_hdr_get_new_id(NULL, &index->id, NULL);
|
||||
dict_hdr_get_new_id(trx, NULL, &index->id, NULL);
|
||||
|
||||
/* Note that the index was created by this transaction. */
|
||||
index->trx_id = trx->id;
|
||||
@@ -724,12 +725,9 @@ dict_build_field_def_step(
|
||||
Creates an index tree for the index.
|
||||
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
dberr_t
|
||||
dict_create_index_tree_step(
|
||||
/*========================*/
|
||||
ind_node_t* node) /*!< in: index create node */
|
||||
dberr_t dict_create_index_tree_step(ind_node_t *node, trx_t *trx)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
btr_pcur_t pcur;
|
||||
dict_index_t* index;
|
||||
dtuple_t* search_tuple;
|
||||
@@ -810,9 +808,9 @@ dberr_t
|
||||
dict_create_index_tree_in_mem(
|
||||
/*==========================*/
|
||||
dict_index_t* index, /*!< in/out: index */
|
||||
const trx_t* trx) /*!< in: InnoDB transaction handle */
|
||||
trx_t* trx) /*!< in: InnoDB transaction handle */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
ut_ad(!(index->type & DICT_FTS));
|
||||
@@ -1249,7 +1247,7 @@ dict_create_index_step(
|
||||
|
||||
if (node->state == INDEX_CREATE_INDEX_TREE) {
|
||||
|
||||
err = dict_create_index_tree_step(node);
|
||||
err = dict_create_index_tree_step(node, trx);
|
||||
|
||||
DBUG_EXECUTE_IF("ib_dict_create_index_tree_fail",
|
||||
err = DB_OUT_OF_MEMORY;);
|
||||
|
||||
@@ -1205,9 +1205,6 @@ inline void dict_sys_t::add(dict_table_t *table) noexcept
|
||||
ut_ad(dict_lru_validate());
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
TRANSACTIONAL_TARGET
|
||||
#endif
|
||||
/** Test whether a table can be evicted from dict_sys.table_LRU.
|
||||
@param table table to be considered for eviction
|
||||
@return whether the table can be evicted */
|
||||
@@ -2054,9 +2051,6 @@ dict_index_add_to_cache(
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
TRANSACTIONAL_TARGET
|
||||
#endif
|
||||
/**********************************************************************//**
|
||||
Removes an index from the dictionary cache. */
|
||||
static
|
||||
@@ -3889,10 +3883,10 @@ dict_print_info_on_foreign_keys(
|
||||
/**********************************************************************//**
|
||||
Flags an index corrupted both in the data dictionary cache
|
||||
and in the SYS_INDEXES */
|
||||
void dict_set_corrupted(dict_index_t *index, const char *ctx)
|
||||
void dict_set_corrupted(trx_t *trx, dict_index_t *index, const char *ctx)
|
||||
{
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
dict_index_t* sys_index;
|
||||
dtuple_t* tuple;
|
||||
dfield_t* dfield;
|
||||
@@ -3981,15 +3975,17 @@ func_exit:
|
||||
}
|
||||
|
||||
/** Sets merge_threshold in the SYS_INDEXES
|
||||
@param[in] thd current_thd
|
||||
@param[in,out] index index
|
||||
@param[in] merge_threshold value to set */
|
||||
void
|
||||
dict_index_set_merge_threshold(
|
||||
const THD& thd,
|
||||
dict_index_t* index,
|
||||
ulint merge_threshold)
|
||||
{
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{thd_to_trx(&thd)};
|
||||
dict_index_t* sys_index;
|
||||
dtuple_t* tuple;
|
||||
dfield_t* dfield;
|
||||
|
||||
@@ -53,6 +53,7 @@ referenced table is pushed into the output stack (fk_tables), if it is not
|
||||
NULL. These tables must be subsequently loaded so that all the foreign
|
||||
key constraints are loaded into memory.
|
||||
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in] name Table name in the db/tablename format
|
||||
@param[in] ignore_err Error to be ignored when loading table
|
||||
and its index definition
|
||||
@@ -61,7 +62,8 @@ key constraints are loaded into memory.
|
||||
constraints are loaded.
|
||||
@return table, possibly with file_unreadable flag set
|
||||
@retval nullptr if the table does not exist */
|
||||
static dict_table_t *dict_load_table_one(const span<const char> &name,
|
||||
static dict_table_t *dict_load_table_one(mtr_t &mtr,
|
||||
const span<const char> &name,
|
||||
dict_err_ignore_t ignore_err,
|
||||
dict_names_t &fk_tables);
|
||||
|
||||
@@ -665,7 +667,7 @@ dict_sys_tables_rec_read(
|
||||
rec, index, nullptr, true, ULINT_UNDEFINED, &heap);
|
||||
const rec_t* old_vers;
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
nullptr, rec, mtr, index, &offsets, &heap,
|
||||
rec, mtr, index, &offsets, &heap,
|
||||
heap, &old_vers, nullptr);
|
||||
mtr->rollback_to_savepoint(savepoint);
|
||||
rec = old_vers;
|
||||
@@ -881,7 +883,7 @@ void dict_load_tablespaces(const std::set<uint32_t> *spaces, bool upgrade)
|
||||
{
|
||||
uint32_t max_space_id = 0;
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
mtr.start();
|
||||
|
||||
@@ -1075,7 +1077,7 @@ err_len:
|
||||
rec, index, nullptr, true, ULINT_UNDEFINED, &heap);
|
||||
const rec_t* old_vers;
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
nullptr, rec, mtr, index, &offsets, &heap,
|
||||
rec, mtr, index, &offsets, &heap,
|
||||
heap, &old_vers, nullptr);
|
||||
mtr->rollback_to_savepoint(savepoint);
|
||||
rec = old_vers;
|
||||
@@ -1290,6 +1292,7 @@ err_len:
|
||||
}
|
||||
|
||||
/** Load the definitions for table columns.
|
||||
@param mtr mini-transaction
|
||||
@param table table
|
||||
@param use_uncommitted 0=READ COMMITTED, 1=detect, 2=READ UNCOMMITTED
|
||||
@param heap memory heap for temporary storage
|
||||
@@ -1298,11 +1301,11 @@ err_len:
|
||||
@retval DB_SUCCESS_LOCKED_REC on success if use_uncommitted=1
|
||||
and instant ADD/DROP/reorder was detected */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static dberr_t dict_load_columns(dict_table_t *table, unsigned use_uncommitted,
|
||||
static dberr_t dict_load_columns(mtr_t &mtr,
|
||||
dict_table_t *table, unsigned use_uncommitted,
|
||||
mem_heap_t *heap)
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
ulint n_skipped = 0;
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
@@ -1416,13 +1419,15 @@ func_exit:
|
||||
}
|
||||
|
||||
/** Loads SYS_VIRTUAL info for one virtual column
|
||||
@param mtr mini-transaction
|
||||
@param table table definition
|
||||
@param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
|
||||
@param nth_v_col virtual column position */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static
|
||||
dberr_t
|
||||
dict_load_virtual_col(dict_table_t *table, bool uncommitted, ulint nth_v_col)
|
||||
dict_load_virtual_col(mtr_t &mtr, dict_table_t *table, bool uncommitted,
|
||||
ulint nth_v_col)
|
||||
{
|
||||
const dict_v_col_t* v_col = dict_table_get_nth_v_col(table, nth_v_col);
|
||||
|
||||
@@ -1432,7 +1437,6 @@ dict_load_virtual_col(dict_table_t *table, bool uncommitted, ulint nth_v_col)
|
||||
|
||||
dict_index_t* sys_virtual_index;
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
@@ -1502,13 +1506,15 @@ func_exit:
|
||||
}
|
||||
|
||||
/** Loads info from SYS_VIRTUAL for virtual columns.
|
||||
@param table table definition
|
||||
@param mtr mini-transaction
|
||||
@param table table definition
|
||||
@param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static dberr_t dict_load_virtual(dict_table_t *table, bool uncommitted)
|
||||
static dberr_t dict_load_virtual(mtr_t &mtr, dict_table_t *table,
|
||||
bool uncommitted)
|
||||
{
|
||||
for (ulint i= 0; i < table->n_v_cols; i++)
|
||||
if (dberr_t err= dict_load_virtual_col(table, uncommitted, i))
|
||||
if (dberr_t err= dict_load_virtual_col(mtr, table, uncommitted, i))
|
||||
return err;
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
@@ -1632,7 +1638,7 @@ err_len:
|
||||
rec, sys_field, nullptr, true, ULINT_UNDEFINED, &heap);
|
||||
const rec_t* old_vers;
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
nullptr, rec, mtr, sys_field, &offsets, &heap,
|
||||
rec, mtr, sys_field, &offsets, &heap,
|
||||
heap, &old_vers, nullptr);
|
||||
mtr->rollback_to_savepoint(savepoint);
|
||||
rec = old_vers;
|
||||
@@ -1668,20 +1674,19 @@ err_len:
|
||||
|
||||
/**
|
||||
Load definitions for index fields.
|
||||
@param mtr mini-transaction
|
||||
@param index index whose fields are to be loaded
|
||||
@param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
|
||||
@param heap memory heap for temporary storage
|
||||
@return error code
|
||||
@return DB_SUCCESS if the fields were loaded successfully */
|
||||
static dberr_t dict_load_fields(dict_index_t *index, bool uncommitted,
|
||||
mem_heap_t *heap)
|
||||
static dberr_t dict_load_fields(mtr_t &mtr, dict_index_t *index,
|
||||
bool uncommitted, mem_heap_t *heap)
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
mtr.start();
|
||||
auto sp = mtr.get_savepoint();
|
||||
|
||||
dict_index_t* sys_index = dict_sys.sys_fields->indexes.start;
|
||||
ut_ad(!dict_sys.sys_fields->not_redundant());
|
||||
@@ -1737,7 +1742,7 @@ static dberr_t dict_load_fields(dict_index_t *index, bool uncommitted,
|
||||
}
|
||||
|
||||
func_exit:
|
||||
mtr.commit();
|
||||
mtr.rollback_to_savepoint(sp);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -1849,7 +1854,7 @@ err_len:
|
||||
rec, sys_index, nullptr, true, ULINT_UNDEFINED, &heap);
|
||||
const rec_t* old_vers;
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
nullptr, rec, mtr, sys_index, &offsets, &heap,
|
||||
rec, mtr, sys_index, &offsets, &heap,
|
||||
heap, &old_vers, nullptr);
|
||||
mtr->rollback_to_savepoint(savepoint);
|
||||
rec = old_vers;
|
||||
@@ -1919,6 +1924,7 @@ err_len:
|
||||
}
|
||||
|
||||
/** Load definitions for table indexes. Adds them to the data dictionary cache.
|
||||
@param mtr mini-transaction
|
||||
@param table table definition
|
||||
@param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
|
||||
@param heap memory heap for temporary storage
|
||||
@@ -1927,18 +1933,16 @@ err_len:
|
||||
@retval DB_SUCCESS if all indexes were successfully loaded
|
||||
@retval DB_CORRUPTION if corruption of dictionary table
|
||||
@retval DB_UNSUPPORTED if table has unknown index type */
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
dberr_t dict_load_indexes(dict_table_t *table, bool uncommitted,
|
||||
dberr_t dict_load_indexes(mtr_t *mtr, dict_table_t *table, bool uncommitted,
|
||||
mem_heap_t *heap, dict_err_ignore_t ignore_err)
|
||||
{
|
||||
dict_index_t* sys_index;
|
||||
btr_pcur_t pcur;
|
||||
byte table_id[8];
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
mtr.start();
|
||||
mtr->start();
|
||||
|
||||
sys_index = dict_sys.sys_indexes->indexes.start;
|
||||
ut_ad(!dict_sys.sys_indexes->not_redundant());
|
||||
@@ -1960,7 +1964,7 @@ dberr_t dict_load_indexes(dict_table_t *table, bool uncommitted,
|
||||
pcur.btr_cur.page_cur.index = sys_index;
|
||||
|
||||
dberr_t error = btr_pcur_open_on_user_rec(&tuple, BTR_SEARCH_LEAF,
|
||||
&pcur, &mtr);
|
||||
&pcur, mtr);
|
||||
if (error != DB_SUCCESS) {
|
||||
goto func_exit;
|
||||
}
|
||||
@@ -1993,7 +1997,7 @@ dberr_t dict_load_indexes(dict_table_t *table, bool uncommitted,
|
||||
}
|
||||
|
||||
err_msg = dict_load_index_low(table_id, uncommitted, heap, rec,
|
||||
&mtr, table, &index);
|
||||
mtr, table, &index);
|
||||
ut_ad(!index == !!err_msg);
|
||||
|
||||
if (err_msg == dict_load_index_none) {
|
||||
@@ -2101,7 +2105,8 @@ corrupted:
|
||||
of the database server */
|
||||
dict_mem_index_free(index);
|
||||
} else {
|
||||
error = dict_load_fields(index, uncommitted, heap);
|
||||
error = dict_load_fields(*mtr, index, uncommitted,
|
||||
heap);
|
||||
if (error != DB_SUCCESS) {
|
||||
goto func_exit;
|
||||
}
|
||||
@@ -2131,7 +2136,7 @@ corrupted:
|
||||
#endif /* UNIV_DEBUG */
|
||||
}
|
||||
next_rec:
|
||||
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
|
||||
btr_pcur_move_to_next_user_rec(&pcur, mtr);
|
||||
}
|
||||
|
||||
if (!dict_table_get_first_index(table)
|
||||
@@ -2160,7 +2165,7 @@ next_rec:
|
||||
}
|
||||
|
||||
func_exit:
|
||||
mtr.commit();
|
||||
mtr->commit();
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -2302,6 +2307,7 @@ referenced table is pushed into the output stack (fk_tables), if it is not
|
||||
NULL. These tables must be subsequently loaded so that all the foreign
|
||||
key constraints are loaded into memory.
|
||||
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in] name Table name in the db/tablename format
|
||||
@param[in] ignore_err Error to be ignored when loading table
|
||||
and its index definition
|
||||
@@ -2310,12 +2316,12 @@ key constraints are loaded into memory.
|
||||
constraints are loaded.
|
||||
@return table, possibly with file_unreadable flag set
|
||||
@retval nullptr if the table does not exist */
|
||||
static dict_table_t *dict_load_table_one(const span<const char> &name,
|
||||
static dict_table_t *dict_load_table_one(mtr_t &mtr,
|
||||
const span<const char> &name,
|
||||
dict_err_ignore_t ignore_err,
|
||||
dict_names_t &fk_tables)
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
|
||||
DBUG_ENTER("dict_load_table_one");
|
||||
DBUG_PRINT("dict_load_table_one",
|
||||
@@ -2392,7 +2398,7 @@ err_exit:
|
||||
|
||||
dict_load_tablespace(table, ignore_err);
|
||||
|
||||
switch (dict_load_columns(table, use_uncommitted, heap)) {
|
||||
switch (dict_load_columns(mtr, table, use_uncommitted, heap)) {
|
||||
case DB_SUCCESS_LOCKED_REC:
|
||||
ut_ad(!uncommitted);
|
||||
uncommitted = true;
|
||||
@@ -2400,7 +2406,7 @@ err_exit:
|
||||
mem_heap_free(heap);
|
||||
goto reload;
|
||||
case DB_SUCCESS:
|
||||
if (!dict_load_virtual(table, uncommitted)) {
|
||||
if (!dict_load_virtual(mtr, table, uncommitted)) {
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
@@ -2430,7 +2436,8 @@ err_exit:
|
||||
? DICT_ERR_IGNORE_ALL
|
||||
: ignore_err;
|
||||
|
||||
err = dict_load_indexes(table, uncommitted, heap, index_load_err);
|
||||
err = dict_load_indexes(&mtr, table, uncommitted, heap,
|
||||
index_load_err);
|
||||
|
||||
if (err == DB_TABLE_CORRUPT) {
|
||||
/* Refuse to load the table if the table has a corrupted
|
||||
@@ -2477,7 +2484,7 @@ corrupted:
|
||||
goto corrupted;
|
||||
}
|
||||
|
||||
err = btr_cur_instant_init(table);
|
||||
err = btr_cur_instant_init(&mtr, table);
|
||||
}
|
||||
} else {
|
||||
ut_ad(ignore_err & DICT_ERR_IGNORE_INDEX);
|
||||
@@ -2497,7 +2504,7 @@ corrupted:
|
||||
/* Don't attempt to load the indexes from disk. */
|
||||
} else if (err == DB_SUCCESS) {
|
||||
auto i = fk_tables.size();
|
||||
err = dict_load_foreigns(table->name.m_name, nullptr,
|
||||
err = dict_load_foreigns(mtr, table->name.m_name, nullptr,
|
||||
0, true, ignore_err, fk_tables);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
@@ -2549,13 +2556,15 @@ dict_table_t *dict_sys_t::load_table(const span<const char> &name,
|
||||
if (dict_table_t *table= find_table(name))
|
||||
return table;
|
||||
dict_names_t fk_list;
|
||||
dict_table_t *table= dict_load_table_one(name, ignore, fk_list);
|
||||
THD* const thd{current_thd};
|
||||
mtr_t mtr{thd ? thd_to_trx(thd) : nullptr};
|
||||
dict_table_t *table= dict_load_table_one(mtr, name, ignore, fk_list);
|
||||
while (!fk_list.empty())
|
||||
{
|
||||
const char *f= fk_list.front();
|
||||
const span<const char> name{f, strlen(f)};
|
||||
if (!find_table(name))
|
||||
dict_load_table_one(name, ignore, fk_list);
|
||||
dict_load_table_one(mtr, name, ignore, fk_list);
|
||||
fk_list.pop_front();
|
||||
}
|
||||
|
||||
@@ -2576,7 +2585,7 @@ dict_load_table_on_id(
|
||||
btr_pcur_t pcur;
|
||||
const byte* field;
|
||||
ulint len;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
@@ -2645,26 +2654,6 @@ check_rec:
|
||||
return table;
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
This function is called when the database is booted. Loads system table
|
||||
index definitions except for the clustered index which is added to the
|
||||
dictionary cache at booting before calling this function. */
|
||||
void
|
||||
dict_load_sys_table(
|
||||
/*================*/
|
||||
dict_table_t* table) /*!< in: system table */
|
||||
{
|
||||
mem_heap_t* heap;
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
heap = mem_heap_create(1000);
|
||||
|
||||
dict_load_indexes(table, false, heap, DICT_ERR_IGNORE_NONE);
|
||||
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
/********************************************************************//**
|
||||
Loads foreign key constraint col names (also for the referenced table).
|
||||
@@ -2676,10 +2665,10 @@ Members that will be created and set by this function:
|
||||
foreign->foreign_col_names[i]
|
||||
foreign->referenced_col_names[i]
|
||||
(for i=0..foreign->n_fields-1) */
|
||||
static dberr_t dict_load_foreign_cols(dict_foreign_t *foreign, trx_id_t trx_id)
|
||||
static dberr_t dict_load_foreign_cols(mtr_t &mtr, dict_foreign_t *foreign,
|
||||
trx_id_t trx_id)
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
size_t id_len;
|
||||
|
||||
ut_ad(dict_sys.locked());
|
||||
@@ -2739,7 +2728,7 @@ static dberr_t dict_load_foreign_cols(dict_foreign_t *foreign, trx_id_t trx_id)
|
||||
&heap);
|
||||
const rec_t* old_vers;
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
nullptr, rec, &mtr, sys_index, &offsets, &heap,
|
||||
rec, &mtr, sys_index, &offsets, &heap,
|
||||
heap, &old_vers, nullptr);
|
||||
mtr.rollback_to_savepoint(savepoint);
|
||||
rec = old_vers;
|
||||
@@ -2830,6 +2819,7 @@ static MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
dict_load_foreign(
|
||||
/*==============*/
|
||||
mtr_t& mtr, /*!< in/out: mini-transaction*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
bool uncommitted, /*!< in: use READ UNCOMMITTED
|
||||
transaction isolation level */
|
||||
@@ -2861,7 +2851,6 @@ dict_load_foreign(
|
||||
btr_pcur_t pcur;
|
||||
const byte* field;
|
||||
ulint len;
|
||||
mtr_t mtr;
|
||||
dict_table_t* for_table;
|
||||
dict_table_t* ref_table;
|
||||
|
||||
@@ -2928,7 +2917,7 @@ err_exit:
|
||||
rec, sys_index, nullptr, true, ULINT_UNDEFINED, &heap);
|
||||
const rec_t* old_vers;
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
nullptr, rec, &mtr, sys_index, &offsets, &heap,
|
||||
rec, &mtr, sys_index, &offsets, &heap,
|
||||
heap, &old_vers, nullptr);
|
||||
mtr.rollback_to_savepoint(savepoint);
|
||||
rec = old_vers;
|
||||
@@ -2993,7 +2982,7 @@ err_exit:
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
err = dict_load_foreign_cols(foreign, trx_id);
|
||||
err = dict_load_foreign_cols(mtr, foreign, trx_id);
|
||||
if (err != DB_SUCCESS) {
|
||||
goto load_error;
|
||||
}
|
||||
@@ -3048,6 +3037,7 @@ cache, then it is added to the output parameter (fk_tables).
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_load_foreigns(
|
||||
mtr_t& mtr, /*!< in/out: mini-transaction*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
const char** col_names, /*!< in: column names, or NULL
|
||||
to use table->col_names */
|
||||
@@ -3065,7 +3055,6 @@ dict_load_foreigns(
|
||||
foreign key constraints. */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
|
||||
DBUG_ENTER("dict_load_foreigns");
|
||||
|
||||
@@ -3166,7 +3155,7 @@ loop:
|
||||
/* Load the foreign constraint definition to the dictionary cache */
|
||||
|
||||
err = len < sizeof fk_id
|
||||
? dict_load_foreign(table_name, false, col_names, trx_id,
|
||||
? dict_load_foreign(mtr, table_name, false, col_names, trx_id,
|
||||
check_recursive, check_charsets,
|
||||
{fk_id, len}, ignore_err, fk_tables)
|
||||
: DB_CORRUPTION;
|
||||
|
||||
@@ -1288,13 +1288,11 @@ bool dict_table_t::deserialise_columns(const byte* metadata, ulint len)
|
||||
}
|
||||
|
||||
/** Check if record in clustered index is historical row.
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in] rec clustered row
|
||||
@param[in] offsets offsets
|
||||
@return true if row is historical */
|
||||
bool
|
||||
dict_index_t::vers_history_row(
|
||||
const rec_t* rec,
|
||||
const rec_offs* offsets)
|
||||
bool dict_index_t::vers_history_row(const rec_t *rec, const rec_offs *offsets)
|
||||
{
|
||||
ut_ad(is_primary());
|
||||
|
||||
@@ -1312,11 +1310,13 @@ dict_index_t::vers_history_row(
|
||||
}
|
||||
|
||||
/** Check if record in secondary index is historical row.
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in] rec record in a secondary index
|
||||
@param[out] history_row true if row is historical
|
||||
@return true on error */
|
||||
bool
|
||||
dict_index_t::vers_history_row(
|
||||
mtr_t* mtr,
|
||||
const rec_t* rec,
|
||||
bool &history_row)
|
||||
{
|
||||
@@ -1337,32 +1337,32 @@ dict_index_t::vers_history_row(
|
||||
insert into t1 values (1, 1);
|
||||
*/
|
||||
bool error = false;
|
||||
mem_heap_t* heap = NULL;
|
||||
dict_index_t* clust_index = NULL;
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets = offsets_;
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr.start();
|
||||
const auto sp = mtr->get_savepoint();
|
||||
|
||||
rec_t* clust_rec =
|
||||
row_get_clust_rec(BTR_SEARCH_LEAF, rec, this, &clust_index, &mtr);
|
||||
row_get_clust_rec(BTR_SEARCH_LEAF, rec, this, &clust_index, mtr);
|
||||
if (clust_rec) {
|
||||
mem_heap_t* heap = NULL;
|
||||
offsets = rec_get_offsets(clust_rec, clust_index, offsets,
|
||||
clust_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
history_row = clust_index->vers_history_row(clust_rec, offsets);
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
} else {
|
||||
ib::error() << "foreign constraints: secondary index is out of "
|
||||
"sync";
|
||||
ut_ad("secondary index is out of sync" == 0);
|
||||
error = true;
|
||||
}
|
||||
mtr.commit();
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
mtr->rollback_to_savepoint(sp);
|
||||
return(error);
|
||||
}
|
||||
|
||||
@@ -831,7 +831,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
|
||||
ulint not_empty_flag = 0;
|
||||
ulint total_external_size = 0;
|
||||
uintmax_t add_on;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs* offsets_rec = NULL;
|
||||
rec_offs* offsets_next_rec = NULL;
|
||||
@@ -1098,13 +1098,12 @@ Calculates new estimates for index statistics. This function is
|
||||
relatively quick and is used to calculate transient statistics that
|
||||
are not saved on disk. This was the only way to calculate statistics
|
||||
before the Persistent Statistics feature was introduced.
|
||||
@param trx transaction
|
||||
@param index B-tree
|
||||
@return error code
|
||||
@retval DB_SUCCESS_LOCKED_REC if the table under bulk insert operation */
|
||||
static
|
||||
dberr_t
|
||||
dict_stats_update_transient_for_index(
|
||||
/*==================================*/
|
||||
dict_index_t* index) /*!< in/out: index */
|
||||
static dberr_t
|
||||
dict_stats_update_transient_for_index(trx_t *trx, dict_index_t* index) noexcept
|
||||
{
|
||||
dberr_t err = DB_SUCCESS;
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO
|
||||
@@ -1125,7 +1124,7 @@ dummy_empty:
|
||||
|| !index->table->space) {
|
||||
goto dummy_empty;
|
||||
} else {
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
|
||||
mtr.start();
|
||||
mtr_sx_lock_index(index, &mtr);
|
||||
@@ -1187,7 +1186,7 @@ invalid:
|
||||
return err;
|
||||
}
|
||||
|
||||
dberr_t dict_stats_update_transient(dict_table_t *table) noexcept
|
||||
dberr_t dict_stats_update_transient(trx_t *trx, dict_table_t *table) noexcept
|
||||
{
|
||||
ut_ad(!table->stats_mutex_is_owner());
|
||||
|
||||
@@ -1226,7 +1225,7 @@ dberr_t dict_stats_update_transient(dict_table_t *table) noexcept
|
||||
continue;
|
||||
}
|
||||
|
||||
err = dict_stats_update_transient_for_index(index);
|
||||
err = dict_stats_update_transient_for_index(trx, index);
|
||||
|
||||
sum_of_index_sizes += index->stat_index_size;
|
||||
}
|
||||
@@ -1815,6 +1814,7 @@ distinct records on the leaf page, when looking at the fist n_prefix
|
||||
columns. Also calculate the number of external pages pointed by records
|
||||
on the leaf page.
|
||||
@param[in] cur cursor
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in] n_prefix look at the first n_prefix columns
|
||||
when comparing records
|
||||
@param[out] n_diff number of distinct records
|
||||
@@ -1824,6 +1824,7 @@ static
|
||||
void
|
||||
dict_stats_analyze_index_below_cur(
|
||||
const btr_cur_t* cur,
|
||||
mtr_t* mtr,
|
||||
ulint n_prefix,
|
||||
ib_uint64_t* n_diff,
|
||||
ib_uint64_t* n_external_pages)
|
||||
@@ -1837,8 +1838,8 @@ dict_stats_analyze_index_below_cur(
|
||||
rec_offs* offsets2;
|
||||
rec_offs* offsets_rec;
|
||||
ulint size;
|
||||
mtr_t mtr;
|
||||
|
||||
const auto sp = mtr->get_savepoint();
|
||||
index = btr_cur_get_index(cur);
|
||||
|
||||
/* Allocate offsets for the record and the node pointer, for
|
||||
@@ -1878,15 +1879,13 @@ dict_stats_analyze_index_below_cur(
|
||||
function without analyzing any leaf pages */
|
||||
*n_external_pages = 0;
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
/* descend to the leaf level on the B-tree */
|
||||
for (;;) {
|
||||
dberr_t err;
|
||||
|
||||
block = buf_page_get_gen(page_id, zip_size,
|
||||
RW_S_LATCH, NULL, BUF_GET,
|
||||
&mtr, &err);
|
||||
mtr, &err);
|
||||
if (!block) {
|
||||
goto func_exit;
|
||||
}
|
||||
@@ -1912,17 +1911,14 @@ dict_stats_analyze_index_below_cur(
|
||||
ut_a(*n_diff > 0);
|
||||
|
||||
if (*n_diff == 1) {
|
||||
mtr_commit(&mtr);
|
||||
|
||||
/* page has all keys equal and the end of the page
|
||||
was reached by dict_stats_scan_page(), no need to
|
||||
descend to the leaf level */
|
||||
mem_heap_free(heap);
|
||||
/* can't get an estimate for n_external_pages here
|
||||
because we do not dive to the leaf level, assume no
|
||||
external pages (*n_external_pages was assigned to 0
|
||||
above). */
|
||||
return;
|
||||
goto func_exit;
|
||||
}
|
||||
/* else */
|
||||
|
||||
@@ -1957,7 +1953,7 @@ dict_stats_analyze_index_below_cur(
|
||||
#endif
|
||||
|
||||
func_exit:
|
||||
mtr_commit(&mtr);
|
||||
mtr->rollback_to_savepoint(sp);
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
@@ -2152,7 +2148,7 @@ dict_stats_analyze_index_for_n_prefix(
|
||||
ib_uint64_t n_external_pages;
|
||||
|
||||
dict_stats_analyze_index_below_cur(btr_pcur_get_btr_cur(&pcur),
|
||||
n_prefix,
|
||||
mtr, n_prefix,
|
||||
&n_diff_on_leaf_page,
|
||||
&n_external_pages);
|
||||
|
||||
@@ -2288,14 +2284,14 @@ members stat_n_diff_key_vals[], stat_n_sample_sizes[], stat_index_size and
|
||||
stat_n_leaf_pages. This function can be slow.
|
||||
@param[in] index index to analyze
|
||||
@return index stats */
|
||||
static index_stats_t dict_stats_analyze_index(dict_index_t* index)
|
||||
static index_stats_t dict_stats_analyze_index(trx_t *trx, dict_index_t* index)
|
||||
{
|
||||
bool level_is_analyzed;
|
||||
ulint n_uniq;
|
||||
ulint n_prefix;
|
||||
ib_uint64_t total_recs;
|
||||
ib_uint64_t total_pages;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
index_stats_t result(index->n_uniq);
|
||||
DBUG_ENTER("dict_stats_analyze_index");
|
||||
|
||||
@@ -2594,7 +2590,7 @@ found_level:
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept
|
||||
dberr_t dict_stats_update_persistent(trx_t *trx, dict_table_t *table) noexcept
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
||||
@@ -2603,7 +2599,7 @@ dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept
|
||||
DEBUG_SYNC_C("dict_stats_update_persistent");
|
||||
|
||||
if (trx_id_t bulk_trx_id = table->bulk_trx_id) {
|
||||
if (trx_sys.find(nullptr, bulk_trx_id, false)) {
|
||||
if (trx_sys.find(trx, bulk_trx_id, false)) {
|
||||
dict_stats_empty_table(table);
|
||||
return DB_SUCCESS_LOCKED_REC;
|
||||
}
|
||||
@@ -2627,7 +2623,7 @@ dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept
|
||||
dict_stats_empty_index(index);
|
||||
table->stats_mutex_unlock();
|
||||
|
||||
index_stats_t stats = dict_stats_analyze_index(index);
|
||||
index_stats_t stats = dict_stats_analyze_index(trx, index);
|
||||
|
||||
if (stats.is_bulk_operation()) {
|
||||
dict_stats_empty_table(table);
|
||||
@@ -2668,7 +2664,7 @@ dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept
|
||||
}
|
||||
|
||||
table->stats_mutex_unlock();
|
||||
stats = dict_stats_analyze_index(index);
|
||||
stats = dict_stats_analyze_index(trx, index);
|
||||
table->stats_mutex_lock();
|
||||
|
||||
if (stats.is_bulk_operation()) {
|
||||
@@ -2706,12 +2702,13 @@ dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
dberr_t dict_stats_update_persistent_try(dict_table_t *table)
|
||||
dberr_t dict_stats_update_persistent_try(trx_t *trx, dict_table_t *table)
|
||||
noexcept
|
||||
{
|
||||
if (table->stats_is_persistent() &&
|
||||
dict_stats_persistent_storage_check(false) == SCHEMA_OK)
|
||||
{
|
||||
if (dberr_t err= dict_stats_update_persistent(table))
|
||||
if (dberr_t err= dict_stats_update_persistent(trx, table))
|
||||
return err;
|
||||
return dict_stats_save(table);
|
||||
}
|
||||
@@ -3517,12 +3514,7 @@ dberr_t dict_stats_fetch_from_ps(dict_table_t *table)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Fetches or calculates new estimates for index statistics. */
|
||||
void
|
||||
dict_stats_update_for_index(
|
||||
/*========================*/
|
||||
dict_index_t* index) /*!< in/out: index */
|
||||
void dict_stats_update_for_index(trx_t *trx, dict_index_t *index) noexcept
|
||||
{
|
||||
dict_table_t *const table= index->table;
|
||||
ut_ad(table->stat_initialized());
|
||||
@@ -3547,7 +3539,7 @@ dict_stats_update_for_index(
|
||||
table->name.basename(), index->name());
|
||||
break;
|
||||
case SCHEMA_OK:
|
||||
index_stats_t stats{dict_stats_analyze_index(index)};
|
||||
index_stats_t stats{dict_stats_analyze_index(trx, index)};
|
||||
table->stats_mutex_lock();
|
||||
index->stat_index_size = stats.index_size;
|
||||
index->stat_n_leaf_pages = stats.n_leaf_pages;
|
||||
@@ -3563,7 +3555,7 @@ dict_stats_update_for_index(
|
||||
return;
|
||||
}
|
||||
|
||||
dict_stats_update_transient_for_index(index);
|
||||
dict_stats_update_transient_for_index(trx, index);
|
||||
}
|
||||
|
||||
/** Execute DELETE FROM mysql.innodb_table_stats
|
||||
|
||||
@@ -118,18 +118,11 @@ static void dict_stats_recalc_pool_add(table_id_t id)
|
||||
dict_stats_schedule_now();
|
||||
}
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
/** Update the table modification counter and if necessary,
|
||||
schedule new estimates for table and index statistics to be calculated.
|
||||
@param[in,out] table persistent or temporary table
|
||||
@param[in] thd current session */
|
||||
void dict_stats_update_if_needed(dict_table_t *table, const trx_t &trx)
|
||||
#else
|
||||
/** Update the table modification counter and if necessary,
|
||||
schedule new estimates for table and index statistics to be calculated.
|
||||
@param[in,out] table persistent or temporary table */
|
||||
void dict_stats_update_if_needed_func(dict_table_t *table)
|
||||
#endif
|
||||
@param[in,out] thd current session */
|
||||
void dict_stats_update_if_needed(dict_table_t *table, trx_t &trx) noexcept
|
||||
{
|
||||
uint32_t stat{table->stat};
|
||||
|
||||
@@ -197,7 +190,7 @@ void dict_stats_update_if_needed_func(dict_table_t *table)
|
||||
|
||||
if (counter > threshold) {
|
||||
/* this will reset table->stat_modified_counter to 0 */
|
||||
dict_stats_update_transient(table);
|
||||
dict_stats_update_transient(&trx, table);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -337,7 +330,7 @@ invalid_table_id:
|
||||
difftime(time(nullptr), table->stats_last_recalc) >= MIN_RECALC_INTERVAL;
|
||||
|
||||
const dberr_t err= update_now
|
||||
? dict_stats_update_persistent_try(table)
|
||||
? dict_stats_update_persistent_try(nullptr, table)
|
||||
: DB_SUCCESS_LOCKED_REC;
|
||||
|
||||
dict_table_close(table, thd, mdl);
|
||||
|
||||
@@ -908,7 +908,7 @@ static inline void fil_crypt_read_crypt_data(fil_space_t *space)
|
||||
return;
|
||||
|
||||
const ulint zip_size= space->zip_size();
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
if (buf_block_t* b= buf_page_get_gen(page_id_t{space->id, 0}, zip_size,
|
||||
RW_S_LATCH, nullptr,
|
||||
@@ -962,7 +962,7 @@ func_exit:
|
||||
fil_crypt_start_converting = true;
|
||||
mysql_mutex_unlock(&fil_crypt_threads_mutex);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
|
||||
/* 2 - get page 0 */
|
||||
@@ -1687,7 +1687,7 @@ fil_crypt_get_page_throttle(
|
||||
|
||||
if (offset % (zip_size ? zip_size : srv_page_size)
|
||||
&& DB_SUCCESS_LOCKED_REC
|
||||
!= fseg_page_is_allocated(space, offset)) {
|
||||
!= fseg_page_is_allocated(mtr, space, offset)) {
|
||||
/* page is already freed */
|
||||
return NULL;
|
||||
}
|
||||
@@ -1755,7 +1755,7 @@ fil_crypt_rotate_page(
|
||||
return;
|
||||
}
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
if (buf_block_t* block = fil_crypt_get_page_throttle(state,
|
||||
offset, &mtr,
|
||||
@@ -1936,7 +1936,7 @@ fil_crypt_flush_space(
|
||||
}
|
||||
|
||||
/* update page 0 */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
|
||||
if (buf_block_t* block = buf_page_get_gen(
|
||||
|
||||
@@ -1669,7 +1669,7 @@ fil_space_t *fil_space_t::drop(uint32_t id, pfs_os_file_t *detached_handle)
|
||||
}
|
||||
|
||||
/* Before deleting the file, persistently write a log record. */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.log_file_op(FILE_DELETE, id, space->chain.start->name);
|
||||
mtr.commit_file(*space, nullptr);
|
||||
@@ -1943,7 +1943,7 @@ dberr_t fil_space_t::rename(const char *path, bool log, bool replace) noexcept
|
||||
}
|
||||
}
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.log_file_op(FILE_RENAME, id, old_path, path);
|
||||
return mtr.commit_file(*this, path) ? DB_SUCCESS : DB_ERROR;
|
||||
@@ -1974,7 +1974,7 @@ fil_ibd_create(
|
||||
{
|
||||
pfs_os_file_t file;
|
||||
bool success;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags) != 0;
|
||||
|
||||
ut_ad(!is_system_tablespace(space_id));
|
||||
@@ -2674,7 +2674,7 @@ void fsp_flags_try_adjust(fil_space_t *space, uint32_t flags)
|
||||
if (!space->size || !space->get_size()) {
|
||||
return;
|
||||
}
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
if (buf_block_t* b = buf_page_get(
|
||||
page_id_t(space->id, 0), space->zip_size(),
|
||||
@@ -3101,7 +3101,7 @@ ATTRIBUTE_NOINLINE ATTRIBUTE_COLD void mtr_t::name_write() noexcept
|
||||
fil_system.named_spaces.push_back(*m_user_space);
|
||||
ut_ad(UT_LIST_GET_LEN(m_user_space->chain) == 1);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.log_file_op(FILE_MODIFY, m_user_space->id,
|
||||
UT_LIST_GET_FIRST(m_user_space->chain)->name);
|
||||
@@ -3114,7 +3114,7 @@ and write out FILE_MODIFY if needed, and write FILE_CHECKPOINT.
|
||||
@return current LSN */
|
||||
ATTRIBUTE_COLD lsn_t fil_names_clear(lsn_t lsn) noexcept
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
ut_ad(log_sys.latch_have_wr());
|
||||
ut_ad(lsn);
|
||||
|
||||
@@ -2546,7 +2546,7 @@ fseg_free_page_low(
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (ahi) {
|
||||
btr_search_drop_page_hash_when_freed(
|
||||
page_id_t(space->id, offset));
|
||||
mtr, page_id_t(space->id, offset));
|
||||
}
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
|
||||
@@ -2672,27 +2672,28 @@ dberr_t fseg_free_page(fseg_header_t *seg_header, fil_space_t *space,
|
||||
}
|
||||
|
||||
/** Determine whether a page is allocated.
|
||||
@param mtr mini-transaction
|
||||
@param space tablespace
|
||||
@param page page number
|
||||
@return error code
|
||||
@retval DB_SUCCESS if the page is marked as free
|
||||
@retval DB_SUCCESS_LOCKED_REC if the page is marked as allocated */
|
||||
dberr_t fseg_page_is_allocated(fil_space_t *space, unsigned page)
|
||||
dberr_t fseg_page_is_allocated(mtr_t *mtr, fil_space_t *space, unsigned page)
|
||||
noexcept
|
||||
{
|
||||
mtr_t mtr;
|
||||
uint32_t dpage= xdes_calc_descriptor_page(space->zip_size(), page);
|
||||
const unsigned zip_size= space->zip_size();
|
||||
dberr_t err= DB_SUCCESS;
|
||||
const auto sp= mtr->get_savepoint();
|
||||
|
||||
mtr.start();
|
||||
if (!space->is_owner())
|
||||
mtr.x_lock_space(space);
|
||||
mtr->x_lock_space(space);
|
||||
|
||||
if (page >= space->free_limit || page >= space->size_in_header);
|
||||
else if (const buf_block_t *b=
|
||||
buf_page_get_gen(page_id_t(space->id, dpage), space->zip_size(),
|
||||
RW_S_LATCH, nullptr, BUF_GET_POSSIBLY_FREED,
|
||||
&mtr, &err))
|
||||
mtr, &err))
|
||||
{
|
||||
if (!dpage &&
|
||||
(space->free_limit !=
|
||||
@@ -2709,7 +2710,7 @@ dberr_t fseg_page_is_allocated(fil_space_t *space, unsigned page)
|
||||
: DB_SUCCESS_LOCKED_REC;
|
||||
}
|
||||
|
||||
mtr.commit();
|
||||
mtr->rollback_to_savepoint(sp);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -2762,6 +2763,7 @@ fseg_free_extent(
|
||||
if the page is found in the pool and
|
||||
is hashed */
|
||||
btr_search_drop_page_hash_when_freed(
|
||||
mtr,
|
||||
page_id_t(space->id,
|
||||
first_page_in_extent + i));
|
||||
}
|
||||
@@ -3615,7 +3617,7 @@ public:
|
||||
dberr_t get_unused(uint16_t boffset, inode_info *unused) const
|
||||
{
|
||||
dberr_t err= DB_SUCCESS;
|
||||
buf_block_t *block= buf_pool.page_fix(page_id_t{0, 0}, &err,
|
||||
buf_block_t *block= buf_pool.page_fix(page_id_t{0, 0}, &err, nullptr,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
return err;
|
||||
@@ -3633,7 +3635,7 @@ public:
|
||||
break;
|
||||
}
|
||||
|
||||
block= buf_pool.page_fix(page_id_t{0, addr.page}, &err,
|
||||
block= buf_pool.page_fix(page_id_t{0, addr.page}, &err, nullptr,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
break;
|
||||
@@ -3693,7 +3695,7 @@ static dberr_t fsp_table_inodes_root(inode_info *inodes, uint32_t root)
|
||||
return DB_SUCCESS;
|
||||
|
||||
dberr_t err= DB_SUCCESS;
|
||||
buf_block_t *block= buf_pool.page_fix(page_id_t{0, root}, &err,
|
||||
buf_block_t *block= buf_pool.page_fix(page_id_t{0, root}, &err, nullptr,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
return err;
|
||||
@@ -3780,7 +3782,7 @@ static dberr_t fsp_get_sys_used_segment(inode_info *inodes, mtr_t *mtr)
|
||||
buf_block_t *block= nullptr;
|
||||
/* Get TRX_SYS_FSEG_HEADER, TRX_SYS_DOUBLEWRITE_FSEG from
|
||||
TRX_SYS_PAGE */
|
||||
block= buf_pool.page_fix(page_id_t{0, TRX_SYS_PAGE_NO}, &err,
|
||||
block= buf_pool.page_fix(page_id_t{0, TRX_SYS_PAGE_NO}, &err, nullptr,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
return err;
|
||||
@@ -3804,7 +3806,7 @@ static dberr_t fsp_get_sys_used_segment(inode_info *inodes, mtr_t *mtr)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
block= buf_pool.page_fix(page_id_t{0, DICT_HDR_PAGE_NO}, &err,
|
||||
block= buf_pool.page_fix(page_id_t{0, DICT_HDR_PAGE_NO}, &err, nullptr,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
return err;
|
||||
@@ -3818,7 +3820,7 @@ static dberr_t fsp_get_sys_used_segment(inode_info *inodes, mtr_t *mtr)
|
||||
return err;
|
||||
|
||||
block= buf_pool.page_fix(page_id_t{0, FSP_IBUF_HEADER_PAGE_NO},
|
||||
&err, buf_pool_t::FIX_WAIT_READ);
|
||||
&err, nullptr, buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
return err;
|
||||
if (!inodes->insert_seg(block->page.frame + PAGE_DATA))
|
||||
@@ -3834,7 +3836,7 @@ static dberr_t fsp_get_sys_used_segment(inode_info *inodes, mtr_t *mtr)
|
||||
if (rseg->space->id == 0)
|
||||
{
|
||||
block= buf_pool.page_fix(rseg->page_id(), &err,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
nullptr, buf_pool_t::FIX_WAIT_READ);
|
||||
if (!block)
|
||||
break;
|
||||
if (!inodes->insert_seg(block->page.frame + TRX_RSEG +
|
||||
@@ -3857,7 +3859,7 @@ static dberr_t fseg_inode_free(uint32_t page_no, uint16_t offset)
|
||||
{
|
||||
fil_space_t *space= fil_system.sys_space;
|
||||
dberr_t err= DB_SUCCESS;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.x_lock_space(space);
|
||||
buf_block_t *iblock= buf_page_get_gen(page_id_t{0, page_no}, 0,
|
||||
@@ -3950,7 +3952,7 @@ dberr_t fil_space_t::garbage_collect(bool shutdown)
|
||||
|
||||
ut_a(id == 0);
|
||||
/* Collect all the used segment inode entries */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
inode_info used_inodes, unused_inodes;
|
||||
dberr_t err= fsp_get_sys_used_segment(&used_inodes, &mtr);
|
||||
@@ -4272,7 +4274,7 @@ static dberr_t fseg_validate(fil_space_t *space,
|
||||
dict_index_t *index) noexcept
|
||||
{
|
||||
/* Validate all FSP list in system tablespace */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
dberr_t err= fseg_validate_low(space, index, &mtr);
|
||||
mtr.commit();
|
||||
@@ -5240,7 +5242,7 @@ class SpaceDefragmenter final
|
||||
/** Collect the extent information from tablespace */
|
||||
dberr_t extract_extent_state() noexcept
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
dberr_t err= DB_SUCCESS;
|
||||
uint32_t last_descr_page_no= 0;
|
||||
fil_space_t *space= fil_system.sys_space;
|
||||
@@ -5654,7 +5656,7 @@ err_exit:
|
||||
|
||||
dberr_t IndexDefragmenter::defragment(SpaceDefragmenter *space_defrag) noexcept
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
dberr_t err= DB_SUCCESS;
|
||||
m_index.lock.x_lock(SRW_LOCK_CALL);
|
||||
@@ -5694,7 +5696,7 @@ dberr_t IndexDefragmenter::defragment(SpaceDefragmenter *space_defrag) noexcept
|
||||
@retval DB_CORRUPTION if any error encountered */
|
||||
static dberr_t user_tables_exists() noexcept
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
btr_pcur_t pcur;
|
||||
dberr_t err= DB_SUCCESS;
|
||||
mtr.start();
|
||||
@@ -5779,7 +5781,7 @@ void fsp_system_tablespace_truncate(bool shutdown)
|
||||
}
|
||||
}
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.x_lock_space(space);
|
||||
err= fsp_traverse_extents(space, &last_used_extent, &mtr);
|
||||
@@ -5932,7 +5934,7 @@ void fsp_shrink_temp_space()
|
||||
{
|
||||
uint32_t last_used_extent= 0;
|
||||
fil_space_t *space= fil_system.temp_space;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
mtr.x_lock_space(space);
|
||||
|
||||
@@ -3307,7 +3307,7 @@ fts_add_doc_from_tuple(
|
||||
doc_id_t doc_id,
|
||||
const dtuple_t* tuple)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{ftt->fts_trx->trx};
|
||||
fts_cache_t* cache = ftt->table->fts->cache;
|
||||
|
||||
ut_ad(cache->get_docs);
|
||||
@@ -3376,7 +3376,7 @@ fts_add_doc_by_id(
|
||||
fts_trx_table_t*ftt, /*!< in: FTS trx table */
|
||||
doc_id_t doc_id) /*!< in: doc id */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{ftt->fts_trx->trx};
|
||||
mem_heap_t* heap;
|
||||
btr_pcur_t pcur;
|
||||
dict_table_t* table;
|
||||
@@ -3624,7 +3624,7 @@ fts_get_max_doc_id(
|
||||
dict_index_t* index;
|
||||
dict_field_t* dfield MY_ATTRIBUTE((unused)) = NULL;
|
||||
doc_id_t doc_id = 0;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
btr_pcur_t pcur;
|
||||
|
||||
index = table->fts_doc_id_index;
|
||||
|
||||
@@ -2019,12 +2019,14 @@ rtr_rec_cal_increase(
|
||||
}
|
||||
|
||||
/** Estimates the number of rows in a given area.
|
||||
@param[in,out] trx transaction
|
||||
@param[in] index index
|
||||
@param[in] tuple range tuple containing mbr, may also be empty tuple
|
||||
@param[in] mode search mode
|
||||
@return estimated number of rows */
|
||||
ha_rows
|
||||
rtr_estimate_n_rows_in_range(
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple,
|
||||
page_cur_mode_t mode)
|
||||
@@ -2065,7 +2067,7 @@ rtr_estimate_n_rows_in_range(
|
||||
* (range_mbr.ymax - range_mbr.ymin);
|
||||
|
||||
/* Get index root page. */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
|
||||
mtr.start();
|
||||
index->set_modified(mtr);
|
||||
|
||||
@@ -109,8 +109,6 @@ extern my_bool opt_readonly;
|
||||
#include "fil0pagecompress.h"
|
||||
#include "ut0mem.h"
|
||||
#include "row0ext.h"
|
||||
#include "mariadb_stats.h"
|
||||
simple_thread_local ha_handler_stats *mariadb_stats;
|
||||
|
||||
#include "lz4.h"
|
||||
#include "lzo/lzo1x.h"
|
||||
@@ -982,8 +980,7 @@ static SHOW_VAR innodb_status_variables[]= {
|
||||
{"buffer_pool_read_ahead", &buf_pool.stat.n_ra_pages_read, SHOW_SIZE_T},
|
||||
{"buffer_pool_read_ahead_evicted",
|
||||
&buf_pool.stat.n_ra_pages_evicted, SHOW_SIZE_T},
|
||||
{"buffer_pool_read_requests",
|
||||
&export_vars.innodb_buffer_pool_read_requests, SHOW_SIZE_T},
|
||||
{"buffer_pool_read_requests", &buf_pool.stat.n_page_gets, SHOW_SIZE_T},
|
||||
{"buffer_pool_reads", &buf_pool.stat.n_pages_read, SHOW_SIZE_T},
|
||||
{"buffer_pool_wait_free", &buf_pool.stat.LRU_waits, SHOW_SIZE_T},
|
||||
{"buffer_pool_write_requests", &buf_pool.flush_list_requests, SHOW_SIZE_T},
|
||||
@@ -1471,7 +1468,6 @@ static void innodb_drop_database(handlerton*, char *path)
|
||||
trx->commit();
|
||||
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
trx->free();
|
||||
if (!stats_failed)
|
||||
stats.close();
|
||||
|
||||
@@ -1496,7 +1492,7 @@ static void innodb_drop_database(handlerton*, char *path)
|
||||
dfield_set_data(&dfield, namebuf, len);
|
||||
dict_index_copy_types(&tuple, sys_index, 1);
|
||||
std::vector<pfs_os_file_t> to_close;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
pcur.btr_cur.page_cur.index = sys_index;
|
||||
err= btr_pcur_open_on_user_rec(&tuple, BTR_SEARCH_LEAF, &pcur, &mtr);
|
||||
@@ -1554,6 +1550,7 @@ static void innodb_drop_database(handlerton*, char *path)
|
||||
log_write_up_to(mtr.commit_lsn(), true);
|
||||
}
|
||||
|
||||
trx->free();
|
||||
my_free(namebuf);
|
||||
}
|
||||
|
||||
@@ -1775,12 +1772,12 @@ const char *thd_innodb_tmpdir(THD *thd)
|
||||
return(tmp_dir);
|
||||
}
|
||||
|
||||
/** Obtain the InnoDB transaction of a MySQL thread.
|
||||
@param[in,out] thd thread handle
|
||||
@return reference to transaction pointer */
|
||||
static trx_t* thd_to_trx(THD* thd)
|
||||
/** Obtain the InnoDB transaction of a MariaDB thread handle.
|
||||
@param thd current_thd
|
||||
@return InnoDB transaction */
|
||||
trx_t *thd_to_trx(const THD *thd) noexcept
|
||||
{
|
||||
return reinterpret_cast<trx_t*>(thd_get_ha_data(thd, innodb_hton_ptr));
|
||||
return static_cast<trx_t*>(thd_get_ha_data(thd, innodb_hton_ptr));
|
||||
}
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
@@ -1923,8 +1920,8 @@ We will attempt to drop the tables here. */
|
||||
static void drop_garbage_tables_after_restore()
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
trx_t *trx= trx_create();
|
||||
mtr_t mtr{trx};
|
||||
|
||||
ut_ad(!purge_sys.enabled());
|
||||
ut_d(purge_sys.stop_FTS());
|
||||
@@ -5747,7 +5744,7 @@ dberr_t ha_innobase::statistics_init(dict_table_t *table, bool recalc)
|
||||
if (recalc)
|
||||
{
|
||||
recalc:
|
||||
err= dict_stats_update_persistent(table);
|
||||
err= dict_stats_update_persistent(m_prebuilt->trx, table);
|
||||
if (err == DB_SUCCESS)
|
||||
err= dict_stats_save(table);
|
||||
}
|
||||
@@ -5781,7 +5778,7 @@ dberr_t ha_innobase::statistics_init(dict_table_t *table, bool recalc)
|
||||
}
|
||||
}
|
||||
|
||||
dict_stats_update_transient(table);
|
||||
dict_stats_update_transient(m_prebuilt->trx, table);
|
||||
}
|
||||
|
||||
return err;
|
||||
@@ -7695,6 +7692,16 @@ int ha_innobase::is_valid_trx(bool altering_to_supported) const noexcept
|
||||
return HA_ERR_ROLLBACK;
|
||||
}
|
||||
|
||||
namespace{
|
||||
struct mariadb_set_stats
|
||||
{
|
||||
trx_t *const trx;
|
||||
mariadb_set_stats(trx_t *trx, ha_handler_stats *stats) : trx(trx)
|
||||
{ trx->active_handler_stats= stats && stats->active ? stats : nullptr; }
|
||||
~mariadb_set_stats() { trx->active_handler_stats= nullptr; }
|
||||
};
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Stores a row in an InnoDB database, to the table specified in this
|
||||
handle.
|
||||
@@ -7711,12 +7718,13 @@ ha_innobase::write_row(
|
||||
#endif
|
||||
int error_result = 0;
|
||||
bool auto_inc_used = false;
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
|
||||
DBUG_ENTER("ha_innobase::write_row");
|
||||
|
||||
trx_t* trx = thd_to_trx(m_user_thd);
|
||||
|
||||
mariadb_set_stats temp(trx, handler_stats);
|
||||
|
||||
/* Validation checks before we commence write_row operation. */
|
||||
if (int err = is_valid_trx()) {
|
||||
DBUG_RETURN(err);
|
||||
@@ -8496,7 +8504,6 @@ ha_innobase::update_row(
|
||||
|
||||
dberr_t error;
|
||||
trx_t* trx = thd_to_trx(m_user_thd);
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
|
||||
DBUG_ENTER("ha_innobase::update_row");
|
||||
|
||||
@@ -8526,6 +8533,8 @@ ha_innobase::update_row(
|
||||
}
|
||||
}
|
||||
|
||||
mariadb_set_stats temp(trx, handler_stats);
|
||||
|
||||
upd_t* uvect = row_get_prebuilt_update_vector(m_prebuilt);
|
||||
ib_uint64_t autoinc;
|
||||
|
||||
@@ -8599,7 +8608,8 @@ ha_innobase::update_row(
|
||||
if any INSERT actually used (and wrote to
|
||||
PAGE_ROOT_AUTO_INC) a value bigger than our
|
||||
autoinc. */
|
||||
btr_write_autoinc(dict_table_get_first_index(
|
||||
btr_write_autoinc(m_prebuilt->trx,
|
||||
dict_table_get_first_index(
|
||||
m_prebuilt->table),
|
||||
autoinc);
|
||||
}
|
||||
@@ -8616,7 +8626,7 @@ func_exit:
|
||||
}
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
if (error == DB_SUCCESS &&
|
||||
if (!err &&
|
||||
/* For sequences, InnoDB transaction may not have been started yet.
|
||||
Check THD-level wsrep state in that case. */
|
||||
(trx->is_wsrep()
|
||||
@@ -8660,7 +8670,6 @@ ha_innobase::delete_row(
|
||||
{
|
||||
dberr_t error;
|
||||
trx_t* trx = thd_to_trx(m_user_thd);
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
|
||||
DBUG_ENTER("ha_innobase::delete_row");
|
||||
|
||||
@@ -8681,6 +8690,7 @@ ha_innobase::delete_row(
|
||||
trx->fts_next_doc_id = 0;
|
||||
|
||||
ut_ad(!trx->is_bulk_insert());
|
||||
mariadb_set_stats temp(trx, handler_stats);
|
||||
error = row_update_for_mysql(m_prebuilt);
|
||||
|
||||
ut_ad(error != DB_DUPLICATE_KEY);
|
||||
@@ -8925,7 +8935,6 @@ ha_innobase::index_read(
|
||||
enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */
|
||||
{
|
||||
DBUG_ENTER("index_read");
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
DEBUG_SYNC_C("ha_innobase_index_read_begin");
|
||||
|
||||
ut_ad(m_prebuilt->trx == thd_to_trx(m_user_thd));
|
||||
@@ -8999,6 +9008,7 @@ ha_innobase::index_read(
|
||||
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
||||
}
|
||||
|
||||
mariadb_set_stats temp(m_prebuilt->trx, handler_stats);
|
||||
dberr_t ret =
|
||||
row_search_mvcc(buf, mode, m_prebuilt, m_last_match_mode, 0);
|
||||
|
||||
@@ -9014,7 +9024,7 @@ ha_innobase::index_read(
|
||||
switch (ret) {
|
||||
case DB_TABLESPACE_DELETED:
|
||||
ib_senderrf(
|
||||
m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
|
||||
m_user_thd, IB_LOG_LEVEL_ERROR,
|
||||
ER_TABLESPACE_DISCARDED,
|
||||
table->s->table_name.str);
|
||||
DBUG_RETURN(HA_ERR_TABLESPACE_MISSING);
|
||||
@@ -9023,7 +9033,7 @@ ha_innobase::index_read(
|
||||
DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
|
||||
case DB_TABLESPACE_NOT_FOUND:
|
||||
ib_senderrf(
|
||||
m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
|
||||
m_user_thd, IB_LOG_LEVEL_ERROR,
|
||||
ER_TABLESPACE_MISSING,
|
||||
table->s->table_name.str);
|
||||
DBUG_RETURN(HA_ERR_TABLESPACE_MISSING);
|
||||
@@ -9224,8 +9234,7 @@ ha_innobase::general_fetch(
|
||||
{
|
||||
DBUG_ENTER("general_fetch");
|
||||
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
const trx_t* trx = m_prebuilt->trx;
|
||||
trx_t* trx = m_prebuilt->trx;
|
||||
|
||||
ut_ad(trx == thd_to_trx(m_user_thd));
|
||||
|
||||
@@ -9251,6 +9260,7 @@ ha_innobase::general_fetch(
|
||||
|
||||
int error;
|
||||
|
||||
mariadb_set_stats temp(trx, handler_stats);
|
||||
switch (dberr_t ret = row_search_mvcc(buf, PAGE_CUR_UNSUPP, m_prebuilt,
|
||||
match_mode, direction)) {
|
||||
case DB_SUCCESS:
|
||||
@@ -9725,7 +9735,6 @@ ha_innobase::ft_read(
|
||||
uchar* buf) /*!< in/out: buf contain result row */
|
||||
{
|
||||
row_prebuilt_t* ft_prebuilt;
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
|
||||
ft_prebuilt = reinterpret_cast<NEW_FT_INFO*>(ft_handler)->ft_prebuilt;
|
||||
|
||||
@@ -9799,6 +9808,7 @@ next_record:
|
||||
tuple, index, &search_doc_id);
|
||||
|
||||
if (ret == DB_SUCCESS) {
|
||||
mariadb_set_stats temp(m_prebuilt->trx, handler_stats);
|
||||
ret = row_search_mvcc(
|
||||
buf, PAGE_CUR_GE, m_prebuilt,
|
||||
ROW_SEL_EXACT, 0);
|
||||
@@ -11889,7 +11899,7 @@ innobase_parse_hint_from_comment(
|
||||
/* GEN_CLUST_INDEX should use
|
||||
merge_threshold_table */
|
||||
dict_index_set_merge_threshold(
|
||||
index, merge_threshold_table);
|
||||
*thd, index, merge_threshold_table);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -11904,7 +11914,7 @@ innobase_parse_hint_from_comment(
|
||||
if (key_info->name.streq(index_name)) {
|
||||
|
||||
dict_index_set_merge_threshold(
|
||||
index,
|
||||
*thd, index,
|
||||
merge_threshold_index[i]);
|
||||
is_found[i] = true;
|
||||
break;
|
||||
@@ -12865,7 +12875,8 @@ int create_table_info_t::create_table(bool create_fk, bool strict)
|
||||
|
||||
/* Check that also referencing constraints are ok */
|
||||
dict_names_t fk_tables;
|
||||
err = dict_load_foreigns(m_table_name, nullptr,
|
||||
mtr_t mtr{m_trx};
|
||||
err = dict_load_foreigns(mtr, m_table_name, nullptr,
|
||||
m_trx->id, true,
|
||||
ignore_err, fk_tables);
|
||||
while (err == DB_SUCCESS && !fk_tables.empty()) {
|
||||
@@ -13154,7 +13165,7 @@ bool create_table_info_t::row_size_is_acceptable(
|
||||
}
|
||||
|
||||
void create_table_info_t::create_table_update_dict(dict_table_t *table,
|
||||
THD *thd,
|
||||
trx_t *trx,
|
||||
const HA_CREATE_INFO &info,
|
||||
const TABLE &t)
|
||||
{
|
||||
@@ -13177,7 +13188,7 @@ void create_table_info_t::create_table_update_dict(dict_table_t *table,
|
||||
|
||||
/* Load server stopword into FTS cache */
|
||||
if (table->flags2 & DICT_TF2_FTS &&
|
||||
innobase_fts_load_stopword(table, nullptr, thd))
|
||||
innobase_fts_load_stopword(table, nullptr, trx->mysql_thd))
|
||||
fts_optimize_add_table(table);
|
||||
|
||||
if (const Field *ai = t.found_next_number_field)
|
||||
@@ -13199,13 +13210,13 @@ void create_table_info_t::create_table_update_dict(dict_table_t *table,
|
||||
/* Persist the "last used" value, which typically is AUTO_INCREMENT - 1.
|
||||
In btr_create(), the value 0 was already written. */
|
||||
if (--autoinc)
|
||||
btr_write_autoinc(dict_table_get_first_index(table), autoinc);
|
||||
btr_write_autoinc(trx, dict_table_get_first_index(table), autoinc);
|
||||
}
|
||||
|
||||
table->autoinc_mutex.wr_unlock();
|
||||
}
|
||||
|
||||
innobase_parse_hint_from_comment(thd, table, t.s);
|
||||
innobase_parse_hint_from_comment(trx->mysql_thd, table, t.s);
|
||||
}
|
||||
|
||||
/** Allocate a new trx. */
|
||||
@@ -13214,6 +13225,7 @@ create_table_info_t::allocate_trx()
|
||||
{
|
||||
m_trx = innobase_trx_allocate(m_thd);
|
||||
m_trx->will_lock = true;
|
||||
m_trx->mysql_thd= m_thd;
|
||||
}
|
||||
|
||||
/** Create a new table to an InnoDB database.
|
||||
@@ -13273,7 +13285,7 @@ ha_innobase::create(const char *name, TABLE *form, HA_CREATE_INFO *create_info,
|
||||
trx->commit(deleted);
|
||||
ut_ad(deleted.empty());
|
||||
info.table()->acquire();
|
||||
info.create_table_update_dict(info.table(), info.thd(),
|
||||
info.create_table_update_dict(info.table(), trx,
|
||||
*create_info, *form);
|
||||
}
|
||||
|
||||
@@ -13418,7 +13430,8 @@ ha_innobase::discard_or_import_tablespace(
|
||||
|
||||
dict_table_t* t = m_prebuilt->table;
|
||||
|
||||
if (dberr_t ret = dict_stats_update_persistent_try(t)) {
|
||||
if (dberr_t ret =
|
||||
dict_stats_update_persistent_try(m_prebuilt->trx, t)) {
|
||||
push_warning_printf(
|
||||
ha_thd(),
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
@@ -13522,7 +13535,7 @@ int ha_innobase::delete_table(const char *name)
|
||||
{
|
||||
dict_sys.unlock();
|
||||
parent_trx->mod_tables.erase(table); /* CREATE...SELECT error handling */
|
||||
btr_drop_temporary_table(*table);
|
||||
btr_drop_temporary_table(parent_trx, *table);
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
dict_sys.remove(table);
|
||||
dict_sys.unlock();
|
||||
@@ -13833,7 +13846,6 @@ static dberr_t innobase_rename_table(trx_t *trx, const char *from,
|
||||
@retval 0 on success */
|
||||
int ha_innobase::truncate()
|
||||
{
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
DBUG_ENTER("ha_innobase::truncate");
|
||||
|
||||
update_thd();
|
||||
@@ -13880,7 +13892,7 @@ int ha_innobase::truncate()
|
||||
if (ib_table->is_temporary())
|
||||
{
|
||||
info.options|= HA_LEX_CREATE_TMP_TABLE;
|
||||
btr_drop_temporary_table(*ib_table);
|
||||
btr_drop_temporary_table(trx, *ib_table);
|
||||
m_prebuilt->table= nullptr;
|
||||
row_prebuilt_free(m_prebuilt);
|
||||
m_prebuilt= nullptr;
|
||||
@@ -14011,7 +14023,7 @@ int ha_innobase::truncate()
|
||||
trx->commit(deleted);
|
||||
m_prebuilt->table->acquire();
|
||||
create_table_info_t::create_table_update_dict(m_prebuilt->table,
|
||||
m_user_thd, info, *table);
|
||||
trx, info, *table);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -14021,8 +14033,11 @@ int ha_innobase::truncate()
|
||||
m_prebuilt->table->def_trx_id= def_trx_id;
|
||||
}
|
||||
dict_names_t fk_tables;
|
||||
dict_load_foreigns(m_prebuilt->table->name.m_name, nullptr, 1, true,
|
||||
DICT_ERR_IGNORE_FK_NOKEY, fk_tables);
|
||||
{
|
||||
mtr_t mtr{trx};
|
||||
dict_load_foreigns(mtr, m_prebuilt->table->name.m_name, nullptr, 1, true,
|
||||
DICT_ERR_IGNORE_FK_NOKEY, fk_tables);
|
||||
}
|
||||
for (const char *f : fk_tables)
|
||||
dict_sys.load_table({f, strlen(f)});
|
||||
}
|
||||
@@ -14320,7 +14335,9 @@ ha_innobase::records_in_range(
|
||||
index due to inconsistency between MySQL and InoDB dictionary info.
|
||||
Necessary message should have been printed in innobase_get_index() */
|
||||
if (!index || !m_prebuilt->table->space) {
|
||||
goto func_exit;
|
||||
func_exit:
|
||||
m_prebuilt->trx->op_info = "";
|
||||
DBUG_RETURN((ha_rows) n_rows);
|
||||
}
|
||||
if (index->is_corrupted()) {
|
||||
n_rows = HA_ERR_INDEX_CORRUPT;
|
||||
@@ -14342,7 +14359,10 @@ ha_innobase::records_in_range(
|
||||
mode1 = PAGE_CUR_GE;
|
||||
dtuple_set_n_fields(range_start, 0);
|
||||
} else if (convert_search_mode_to_innobase(min_key->flag, mode1)) {
|
||||
goto unsupported;
|
||||
cleanup:
|
||||
unsupported:
|
||||
mem_heap_free(heap);
|
||||
goto func_exit;
|
||||
} else {
|
||||
dict_index_copy_types(range_start, index, key->ext_key_parts);
|
||||
row_sel_convert_mysql_key_to_innobase(
|
||||
@@ -14368,14 +14388,18 @@ ha_innobase::records_in_range(
|
||||
DBUG_ASSERT(range_end->n_fields > 0);
|
||||
}
|
||||
|
||||
mariadb_set_stats temp(m_prebuilt->trx, handler_stats);
|
||||
|
||||
if (dict_index_is_spatial(index)) {
|
||||
/*Only min_key used in spatial index. */
|
||||
n_rows = rtr_estimate_n_rows_in_range(
|
||||
m_prebuilt->trx,
|
||||
index, range_start, mode1);
|
||||
} else {
|
||||
btr_pos_t tuple1(range_start, mode1, pages->first_page);
|
||||
btr_pos_t tuple2(range_end, mode2, pages->last_page);
|
||||
n_rows = btr_estimate_n_rows_in_range(index, &tuple1, &tuple2);
|
||||
n_rows = btr_estimate_n_rows_in_range(m_prebuilt->trx,
|
||||
index, &tuple1, &tuple2);
|
||||
pages->first_page= tuple1.page_id.raw();
|
||||
pages->last_page= tuple2.page_id.raw();
|
||||
}
|
||||
@@ -14399,11 +14423,7 @@ ha_innobase::records_in_range(
|
||||
n_rows = 1;
|
||||
}
|
||||
|
||||
unsupported:
|
||||
mem_heap_free(heap);
|
||||
func_exit:
|
||||
m_prebuilt->trx->op_info = "";
|
||||
DBUG_RETURN((ha_rows) n_rows);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
@@ -14418,7 +14438,6 @@ ha_innobase::estimate_rows_upper_bound()
|
||||
const dict_index_t* index;
|
||||
ulonglong estimate;
|
||||
ulonglong local_data_file_length;
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
DBUG_ENTER("estimate_rows_upper_bound");
|
||||
|
||||
/* We do not know if MySQL can call this function before calling
|
||||
@@ -14427,8 +14446,6 @@ ha_innobase::estimate_rows_upper_bound()
|
||||
|
||||
update_thd(ha_thd());
|
||||
|
||||
m_prebuilt->trx->op_info = "calculating upper bound for table rows";
|
||||
|
||||
index = dict_table_get_first_index(m_prebuilt->table);
|
||||
|
||||
ulint stat_n_leaf_pages = index->stat_n_leaf_pages;
|
||||
@@ -14446,8 +14463,6 @@ ha_innobase::estimate_rows_upper_bound()
|
||||
estimate = 2 * local_data_file_length
|
||||
/ dict_index_calc_min_rec_len(index);
|
||||
|
||||
m_prebuilt->trx->op_info = "";
|
||||
|
||||
/* Set num_rows less than MERGEBUFF to simulate the case where we do
|
||||
not have enough space to merge the externally sorted file blocks. */
|
||||
DBUG_EXECUTE_IF("set_num_rows_lt_MERGEBUFF",
|
||||
@@ -14836,7 +14851,7 @@ recalc:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = dict_stats_update_transient(ib_table);
|
||||
ret = dict_stats_update_transient(m_prebuilt->trx, ib_table);
|
||||
if (ret != DB_SUCCESS) {
|
||||
error:
|
||||
m_prebuilt->trx->op_info = "";
|
||||
@@ -15318,7 +15333,7 @@ ha_innobase::check(
|
||||
"dict_set_index_corrupted",
|
||||
if (!index->is_primary()) {
|
||||
m_prebuilt->index_usable = FALSE;
|
||||
dict_set_corrupted(index,
|
||||
dict_set_corrupted(m_prebuilt->trx, index,
|
||||
"dict_set_index_corrupted");
|
||||
});
|
||||
|
||||
@@ -15392,7 +15407,8 @@ ha_innobase::check(
|
||||
" index %s is corrupted.",
|
||||
index->name());
|
||||
is_ok = false;
|
||||
dict_set_corrupted(index, "CHECK TABLE-check index");
|
||||
dict_set_corrupted(m_prebuilt->trx, index,
|
||||
"CHECK TABLE-check index");
|
||||
}
|
||||
|
||||
|
||||
@@ -15407,7 +15423,8 @@ ha_innobase::check(
|
||||
" entries, should be " ULINTPF ".",
|
||||
index->name(), n_rows, n_rows_in_table);
|
||||
is_ok = false;
|
||||
dict_set_corrupted(index, "CHECK TABLE; Wrong count");
|
||||
dict_set_corrupted(m_prebuilt->trx, index,
|
||||
"CHECK TABLE; Wrong count");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15807,7 +15824,7 @@ ha_innobase::extra(
|
||||
goto stmt_boundary;
|
||||
case HA_EXTRA_NO_IGNORE_DUP_KEY:
|
||||
trx = check_trx_exists(thd);
|
||||
trx->duplicates &= ~TRX_DUP_IGNORE;
|
||||
trx->duplicates &= TRX_DUP_REPLACE;
|
||||
if (trx->is_bulk_insert()) {
|
||||
/* Allow a subsequent INSERT into an empty table
|
||||
if !unique_checks && !foreign_key_checks. */
|
||||
@@ -15824,7 +15841,7 @@ ha_innobase::extra(
|
||||
goto stmt_boundary;
|
||||
case HA_EXTRA_WRITE_CANNOT_REPLACE:
|
||||
trx = check_trx_exists(thd);
|
||||
trx->duplicates &= ~TRX_DUP_REPLACE;
|
||||
trx->duplicates &= TRX_DUP_IGNORE;
|
||||
if (trx->is_bulk_insert()) {
|
||||
/* Allow a subsequent INSERT into an empty table
|
||||
if !unique_checks && !foreign_key_checks. */
|
||||
@@ -15879,7 +15896,7 @@ ha_innobase::extra(
|
||||
handler::extra(HA_EXTRA_BEGIN_ALTER_COPY). */
|
||||
log_buffer_flush_to_disk();
|
||||
}
|
||||
alter_stats_rebuild(m_prebuilt->table, thd);
|
||||
alter_stats_rebuild(m_prebuilt->table, trx);
|
||||
break;
|
||||
case HA_EXTRA_ABORT_ALTER_COPY:
|
||||
if (m_prebuilt->table->skip_alter_undo) {
|
||||
@@ -16751,7 +16768,6 @@ ha_innobase::get_auto_increment(
|
||||
trx_t* trx;
|
||||
dberr_t error;
|
||||
ulonglong autoinc = 0;
|
||||
mariadb_set_stats set_stats_temporary(handler_stats);
|
||||
|
||||
/* Prepare m_prebuilt->trx in the table handle */
|
||||
update_thd(ha_thd());
|
||||
@@ -17268,9 +17284,9 @@ static int innobase_recover_rollback_by_xid(const XID *xid)
|
||||
{
|
||||
ut_ad(trx->rsegs.m_redo.undo->rseg == trx->rsegs.m_redo.rseg);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
trx_undo_set_state_at_prepare(trx, trx->rsegs.m_redo.undo, true, &mtr);
|
||||
trx_undo_set_state_at_prepare(trx->rsegs.m_redo.undo, true, &mtr);
|
||||
mtr.commit();
|
||||
|
||||
ut_ad(mtr.commit_lsn() > 0);
|
||||
@@ -17679,7 +17695,7 @@ static
|
||||
void
|
||||
innodb_make_page_dirty(THD*, st_mysql_sys_var*, void*, const void* save)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
uint space_id = *static_cast<const uint*>(save);
|
||||
srv_fil_make_page_dirty_debug= space_id;
|
||||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||
@@ -18448,7 +18464,7 @@ now.
|
||||
@param[in] save immediate result from check function */
|
||||
static
|
||||
void
|
||||
innodb_merge_threshold_set_all_debug_update(THD*, st_mysql_sys_var*, void*,
|
||||
innodb_merge_threshold_set_all_debug_update(THD* thd, st_mysql_sys_var*, void*,
|
||||
const void* save)
|
||||
{
|
||||
innodb_merge_threshold_set_all_debug
|
||||
@@ -18672,7 +18688,7 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*,
|
||||
ut_ad(!log_sys.is_mmap());
|
||||
/* The server is almost idle. Write dummy FILE_CHECKPOINT records
|
||||
to ensure that the log resizing will complete. */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.commit_files(log_sys.last_checkpoint_lsn);
|
||||
}
|
||||
@@ -21301,19 +21317,19 @@ void ins_node_t::vers_update_end(row_prebuilt_t *prebuilt, bool history_row)
|
||||
Remove statistics for dropped indexes, add statistics for created indexes
|
||||
and rename statistics for renamed indexes.
|
||||
@param table InnoDB table that was rebuilt by ALTER TABLE
|
||||
@param thd alter table thread */
|
||||
void alter_stats_rebuild(dict_table_t *table, THD *thd)
|
||||
@param trx user transaction */
|
||||
void alter_stats_rebuild(dict_table_t *table, trx_t *trx) noexcept
|
||||
{
|
||||
DBUG_ENTER("alter_stats_rebuild");
|
||||
if (!table->space || !table->stats_is_persistent()
|
||||
|| dict_stats_persistent_storage_check(false) != SCHEMA_OK)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
dberr_t ret= dict_stats_update_persistent(table);
|
||||
dberr_t ret= dict_stats_update_persistent(trx, table);
|
||||
if (ret == DB_SUCCESS)
|
||||
ret= dict_stats_save(table);
|
||||
if (ret != DB_SUCCESS)
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
push_warning_printf(trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO, "Error updating stats for table after"
|
||||
" table rebuild: %s", ut_strerr(ret));
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
@@ -623,7 +623,7 @@ public:
|
||||
@param create_fk whether to add FOREIGN KEY constraints */
|
||||
int create_table(bool create_fk = true, bool strict= true);
|
||||
|
||||
static void create_table_update_dict(dict_table_t* table, THD* thd,
|
||||
static void create_table_update_dict(dict_table_t* table, trx_t* trx,
|
||||
const HA_CREATE_INFO& info,
|
||||
const TABLE& t);
|
||||
|
||||
@@ -919,6 +919,6 @@ bool too_big_key_part_length(size_t max_field_len, const KEY& key);
|
||||
/** Adjust the persistent statistics after rebuilding ALTER TABLE.
|
||||
Remove statistics for dropped indexes, add statistics for created indexes
|
||||
and rename statistics for renamed indexes.
|
||||
@param table_name Table name in MySQL
|
||||
@param thd alter table thread */
|
||||
void alter_stats_rebuild(dict_table_t *table, THD *thd);
|
||||
@param table InnoDB table that was rebuilt by ALTER TABLE
|
||||
@param trx user transaction */
|
||||
void alter_stats_rebuild(dict_table_t *table, trx_t *trx) noexcept;
|
||||
|
||||
@@ -2122,17 +2122,18 @@ innobase_fts_check_doc_id_col(
|
||||
}
|
||||
|
||||
/** Check whether the table is empty.
|
||||
@param[in] table table to be checked
|
||||
@param[in] prebuilt table to be checked
|
||||
@param[in] ignore_delete_marked Ignore the delete marked
|
||||
flag record
|
||||
@return true if table is empty */
|
||||
static bool innobase_table_is_empty(const dict_table_t *table,
|
||||
bool ignore_delete_marked=true)
|
||||
static bool innobase_table_is_empty(row_prebuilt_t *prebuilt,
|
||||
bool ignore_delete_marked)
|
||||
{
|
||||
const dict_table_t *const table{prebuilt->table};
|
||||
if (!table->space)
|
||||
return false;
|
||||
dict_index_t *clust_index= dict_table_get_first_index(table);
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{prebuilt->trx};
|
||||
btr_pcur_t pcur;
|
||||
buf_block_t *block;
|
||||
page_cur_t *cur;
|
||||
@@ -2443,7 +2444,7 @@ innodb_instant_alter_column_allowed_reason:
|
||||
for newly added column when table is not empty */
|
||||
if (ha_alter_info->error_if_not_empty
|
||||
&& m_prebuilt->table->space
|
||||
&& !innobase_table_is_empty(m_prebuilt->table)) {
|
||||
&& !innobase_table_is_empty(m_prebuilt, true)) {
|
||||
DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
|
||||
}
|
||||
|
||||
@@ -6177,7 +6178,7 @@ add_all_virtual:
|
||||
memset(roll_ptr, 0, sizeof roll_ptr);
|
||||
|
||||
dtuple_t* entry = index->instant_metadata(*row, ctx->heap);
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
index->set_modified(mtr);
|
||||
btr_pcur_t pcur;
|
||||
@@ -7282,7 +7283,8 @@ error_handling_drop_uncached_1:
|
||||
|| !user_table->space) {
|
||||
} else if (ib_uint64_t autoinc
|
||||
= btr_read_autoinc(clust_index)) {
|
||||
btr_write_autoinc(new_clust_index, autoinc);
|
||||
btr_write_autoinc(ctx->prebuilt->trx,
|
||||
new_clust_index, autoinc);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8009,7 +8011,7 @@ ha_innobase::prepare_inplace_alter_table(
|
||||
/* Ignore the MDL downgrade when table is empty.
|
||||
This optimization is disabled for partition table. */
|
||||
ha_alter_info->mdl_exclusive_after_prepare =
|
||||
innobase_table_is_empty(m_prebuilt->table, false);
|
||||
innobase_table_is_empty(m_prebuilt, false);
|
||||
if (ha_alter_info->online
|
||||
&& ha_alter_info->mdl_exclusive_after_prepare) {
|
||||
ha_alter_info->online = false;
|
||||
@@ -9946,7 +9948,8 @@ commit_set_autoinc(
|
||||
/* Bulk index creation does not update
|
||||
PAGE_ROOT_AUTO_INC, so we must persist the "last used"
|
||||
value here. */
|
||||
btr_write_autoinc(dict_table_get_first_index(ctx->new_table),
|
||||
btr_write_autoinc(ctx->trx,
|
||||
dict_table_get_first_index(ctx->new_table),
|
||||
autoinc - 1, true);
|
||||
} else if ((ha_alter_info->handler_flags
|
||||
& ALTER_CHANGE_CREATE_OPTION)
|
||||
@@ -10017,7 +10020,8 @@ commit_set_autoinc(
|
||||
}
|
||||
}
|
||||
|
||||
btr_write_autoinc(dict_table_get_first_index(ctx->new_table),
|
||||
btr_write_autoinc(ctx->trx,
|
||||
dict_table_get_first_index(ctx->new_table),
|
||||
autoinc, true);
|
||||
} else if (ctx->need_rebuild()) {
|
||||
/* No AUTO_INCREMENT value was specified.
|
||||
@@ -10171,8 +10175,9 @@ innobase_update_foreign_cache(
|
||||
and prevent the table from being evicted from the data
|
||||
dictionary cache (work around the lack of WL#6049). */
|
||||
dict_names_t fk_tables;
|
||||
mtr_t mtr{ctx->trx};
|
||||
|
||||
err = dict_load_foreigns(user_table->name.m_name,
|
||||
err = dict_load_foreigns(mtr, user_table->name.m_name,
|
||||
ctx->col_names, 1, true,
|
||||
DICT_ERR_IGNORE_FK_NOKEY,
|
||||
fk_tables);
|
||||
@@ -10183,7 +10188,7 @@ innobase_update_foreign_cache(
|
||||
/* It is possible there are existing foreign key are
|
||||
loaded with "foreign_key checks" off,
|
||||
so let's retry the loading with charset_check is off */
|
||||
err = dict_load_foreigns(user_table->name.m_name,
|
||||
err = dict_load_foreigns(mtr, user_table->name.m_name,
|
||||
ctx->col_names, 1, false,
|
||||
DICT_ERR_IGNORE_NONE,
|
||||
fk_tables);
|
||||
@@ -11014,7 +11019,7 @@ commit_cache_norebuild(
|
||||
becomes durable, fsp_flags_try_adjust()
|
||||
will perform the equivalent adjustment
|
||||
and warn "adjusting FSP_SPACE_FLAGS". */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
if (buf_block_t* b = buf_page_get(
|
||||
page_id_t(space->id, 0),
|
||||
@@ -11184,7 +11189,7 @@ Remove statistics for dropped indexes, add statistics for created indexes
|
||||
and rename statistics for renamed indexes.
|
||||
@param ha_alter_info Data used during in-place alter
|
||||
@param ctx In-place ALTER TABLE context
|
||||
@param thd alter table thread
|
||||
@param trx user transaction
|
||||
*/
|
||||
static
|
||||
void
|
||||
@@ -11192,7 +11197,7 @@ alter_stats_norebuild(
|
||||
/*==================*/
|
||||
Alter_inplace_info* ha_alter_info,
|
||||
ha_innobase_inplace_ctx* ctx,
|
||||
THD* thd)
|
||||
trx_t* trx)
|
||||
{
|
||||
DBUG_ENTER("alter_stats_norebuild");
|
||||
DBUG_ASSERT(!ctx->need_rebuild());
|
||||
@@ -11209,7 +11214,7 @@ alter_stats_norebuild(
|
||||
DBUG_ASSERT(index->table == ctx->new_table);
|
||||
|
||||
if (!(index->type & DICT_FTS)) {
|
||||
dict_stats_update_for_index(index);
|
||||
dict_stats_update_for_index(trx, index);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11886,7 +11891,7 @@ foreign_fail:
|
||||
(*pctx);
|
||||
DBUG_ASSERT(ctx->need_rebuild());
|
||||
|
||||
alter_stats_rebuild(ctx->new_table, m_user_thd);
|
||||
alter_stats_rebuild(ctx->new_table, m_prebuilt->trx);
|
||||
}
|
||||
} else {
|
||||
for (inplace_alter_handler_ctx** pctx = ctx_array;
|
||||
@@ -11896,7 +11901,8 @@ foreign_fail:
|
||||
(*pctx);
|
||||
DBUG_ASSERT(!ctx->need_rebuild());
|
||||
|
||||
alter_stats_norebuild(ha_alter_info, ctx, m_user_thd);
|
||||
alter_stats_norebuild(ha_alter_info, ctx,
|
||||
m_prebuilt->trx);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4535,10 +4535,6 @@ i_s_sys_tables_fill_table(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_tables_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -4547,6 +4543,10 @@ i_s_sys_tables_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
mtr.start();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
|
||||
@@ -4764,11 +4764,6 @@ i_s_sys_tables_fill_table_stats(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_tables_fill_table_stats");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -4777,6 +4772,11 @@ i_s_sys_tables_fill_table_stats(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
mtr.start();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
|
||||
@@ -4986,12 +4986,6 @@ i_s_sys_indexes_fill_table(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_indexes_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -5000,6 +4994,12 @@ i_s_sys_indexes_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
heap = mem_heap_create(1000);
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
mtr_start(&mtr);
|
||||
@@ -5198,13 +5198,6 @@ i_s_sys_columns_fill_table(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
const char* col_name;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_columns_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -5213,6 +5206,13 @@ i_s_sys_columns_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
const char* col_name;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
heap = mem_heap_create(1000);
|
||||
mtr.start();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
@@ -5390,13 +5390,6 @@ i_s_sys_virtual_fill_table(
|
||||
TABLE_LIST* tables,
|
||||
Item* )
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
ulint pos;
|
||||
ulint base_pos;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_virtual_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -5405,6 +5398,13 @@ i_s_sys_virtual_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
ulint pos;
|
||||
ulint base_pos;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
mtr.start();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
|
||||
@@ -5572,13 +5572,6 @@ i_s_sys_fields_fill_table(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
index_id_t last_id;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_fields_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -5588,6 +5581,13 @@ i_s_sys_fields_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
index_id_t last_id;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
heap = mem_heap_create(1000);
|
||||
mtr.start();
|
||||
|
||||
@@ -5774,12 +5774,6 @@ i_s_sys_foreign_fill_table(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_foreign_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -5788,6 +5782,12 @@ i_s_sys_foreign_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
heap = mem_heap_create(1000);
|
||||
mtr.start();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
@@ -5962,12 +5962,6 @@ i_s_sys_foreign_cols_fill_table(
|
||||
TABLE_LIST* tables, /*!< in/out: tables to fill */
|
||||
Item* ) /*!< in: condition (not used) */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr;
|
||||
int err = 0;
|
||||
|
||||
DBUG_ENTER("i_s_sys_foreign_cols_fill_table");
|
||||
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
|
||||
|
||||
@@ -5977,6 +5971,12 @@ i_s_sys_foreign_cols_fill_table(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap;
|
||||
mtr_t mtr{thd_to_trx(thd)};
|
||||
int err = 0;
|
||||
|
||||
heap = mem_heap_create(1000);
|
||||
mtr.start();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
|
||||
@@ -729,7 +729,7 @@ static dberr_t ibuf_merge(fil_space_t *space, btr_cur_t *cur, mtr_t *mtr)
|
||||
if (!block);
|
||||
else if (fil_page_get_type(block->page.frame) != FIL_PAGE_INDEX ||
|
||||
!page_is_leaf(block->page.frame) ||
|
||||
DB_SUCCESS == fseg_page_is_allocated(space, page_no))
|
||||
DB_SUCCESS == fseg_page_is_allocated(mtr, space, page_no))
|
||||
block= nullptr;
|
||||
else
|
||||
buffered= ibuf_bitmap_buffered(bitmap, block->page.id().page_no());
|
||||
@@ -919,7 +919,7 @@ ATTRIBUTE_COLD dberr_t ibuf_upgrade()
|
||||
|
||||
size_t spaces=0, pages= 0;
|
||||
dberr_t err;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr_x_lock_index(ibuf_index, &mtr);
|
||||
|
||||
@@ -1025,7 +1025,7 @@ ATTRIBUTE_COLD dberr_t ibuf_upgrade()
|
||||
|
||||
dberr_t ibuf_upgrade_needed()
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.x_lock_space(fil_system.sys_space);
|
||||
dberr_t err;
|
||||
|
||||
@@ -68,12 +68,11 @@ is acceptable for the program to die with a clear assert failure. */
|
||||
|
||||
/**************************************************************//**
|
||||
Checks and adjusts the root node of a tree during IMPORT TABLESPACE.
|
||||
@return error code, or DB_SUCCESS */
|
||||
dberr_t
|
||||
btr_root_adjust_on_import(
|
||||
/*======================*/
|
||||
const dict_index_t* index) /*!< in: index tree */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
@param trx transaction
|
||||
@param index index tree
|
||||
@return error code */
|
||||
dberr_t btr_root_adjust_on_import(trx_t *trx, const dict_index_t *index)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Check a file segment header within a B-tree root page.
|
||||
@param offset file segment header offset
|
||||
@@ -185,8 +184,9 @@ void btr_free_if_exists(fil_space_t *space, uint32_t page,
|
||||
index_id_t index_id, mtr_t *mtr);
|
||||
|
||||
/** Drop a temporary table
|
||||
@param trx transaction
|
||||
@param table temporary table */
|
||||
void btr_drop_temporary_table(const dict_table_t &table);
|
||||
void btr_drop_temporary_table(trx_t *trx, const dict_table_t &table);
|
||||
|
||||
/** Read the last used AUTO_INCREMENT value from PAGE_ROOT_AUTO_INC.
|
||||
@param[in,out] index clustered index
|
||||
@@ -210,13 +210,15 @@ uint64_t btr_read_autoinc_with_fallback(const dict_table_t *table,
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Write the next available AUTO_INCREMENT value to PAGE_ROOT_AUTO_INC.
|
||||
@param[in,out] trx transaction
|
||||
@param[in,out] index clustered index
|
||||
@param[in] autoinc the AUTO_INCREMENT value
|
||||
@param[in] reset whether to reset the AUTO_INCREMENT
|
||||
to a possibly smaller value than currently
|
||||
exists in the page */
|
||||
void
|
||||
btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset = false)
|
||||
btr_write_autoinc(trx_t *trx, dict_index_t *index, uint64_t autoinc,
|
||||
bool reset = false)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/** Write instant ALTER TABLE metadata to a root page.
|
||||
@@ -506,8 +508,8 @@ dberr_t
|
||||
btr_validate_index(
|
||||
/*===============*/
|
||||
dict_index_t* index, /*!< in: index */
|
||||
const trx_t* trx) /*!< in: transaction or 0 */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
trx_t* trx) /*!< in: transaction */
|
||||
MY_ATTRIBUTE((warn_unused_result,nonnull));
|
||||
|
||||
/** Remove a page from the level list of pages.
|
||||
@param[in] block page to remove
|
||||
|
||||
@@ -51,19 +51,18 @@ class PageBulk
|
||||
public:
|
||||
/** Constructor
|
||||
@param[in] index B-tree index
|
||||
@param[in,out] trx trnsaction
|
||||
@param[in] page_no page number
|
||||
@param[in] level page level
|
||||
@param[in] trx_id transaction id */
|
||||
@param[in] level page level */
|
||||
PageBulk(
|
||||
dict_index_t* index,
|
||||
trx_id_t trx_id,
|
||||
trx_t* trx,
|
||||
uint32_t page_no,
|
||||
ulint level)
|
||||
:
|
||||
m_heap(NULL),
|
||||
m_index(index),
|
||||
m_mtr(),
|
||||
m_trx_id(trx_id),
|
||||
m_mtr(trx),
|
||||
m_block(NULL),
|
||||
m_page(NULL),
|
||||
m_page_zip(NULL),
|
||||
@@ -222,9 +221,6 @@ private:
|
||||
/** The mini-transaction */
|
||||
mtr_t m_mtr;
|
||||
|
||||
/** The transaction id */
|
||||
trx_id_t m_trx_id;
|
||||
|
||||
/** The buffer block */
|
||||
buf_block_t* m_block;
|
||||
|
||||
@@ -284,8 +280,7 @@ public:
|
||||
@param[in] index B-tree index
|
||||
@param[in] trx transaction */
|
||||
BtrBulk(
|
||||
dict_index_t* index,
|
||||
const trx_t* trx)
|
||||
dict_index_t* index, trx_t* trx)
|
||||
:
|
||||
m_index(index),
|
||||
m_trx(trx)
|
||||
@@ -359,7 +354,7 @@ private:
|
||||
dict_index_t*const m_index;
|
||||
|
||||
/** Transaction */
|
||||
const trx_t*const m_trx;
|
||||
trx_t*const m_trx;
|
||||
|
||||
/** Root page level */
|
||||
ulint m_root_level;
|
||||
|
||||
@@ -93,11 +93,12 @@ btr_cur_position(
|
||||
|
||||
/** Load the instant ALTER TABLE metadata from the clustered index
|
||||
when loading a table definition.
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in,out] table table definition from the data dictionary
|
||||
@return error code
|
||||
@retval DB_SUCCESS if no error occurred */
|
||||
dberr_t
|
||||
btr_cur_instant_init(dict_table_t* table)
|
||||
btr_cur_instant_init(mtr_t *mtr, dict_table_t* table)
|
||||
ATTRIBUTE_COLD __attribute__((nonnull, warn_unused_result));
|
||||
|
||||
/** Initialize the n_core_null_bytes on first access to a clustered
|
||||
@@ -447,11 +448,12 @@ number of rows, otherwise count the estimated(see
|
||||
btr_estimate_n_rows_in_range_on_level() for details) number if rows, and
|
||||
fetch the right page. If leaves are reached, unlatch non-leaf pages except
|
||||
the right leaf parent. After the right leaf page is fetched, commit mtr.
|
||||
@param[in] index index
|
||||
@param[in] range_start range start
|
||||
@param[in] range_end range end
|
||||
@param trx transaction
|
||||
@param index B-tree
|
||||
@param range_start first key
|
||||
@param range_end last key
|
||||
@return estimated number of rows; */
|
||||
ha_rows btr_estimate_n_rows_in_range(dict_index_t *index,
|
||||
ha_rows btr_estimate_n_rows_in_range(trx_t *trx, dict_index_t *index,
|
||||
btr_pos_t *range_start,
|
||||
btr_pos_t *range_end);
|
||||
|
||||
|
||||
@@ -77,8 +77,10 @@ void btr_search_drop_page_hash_index(buf_block_t *block,
|
||||
|
||||
/** Drop possible adaptive hash index entries when a page is evicted
|
||||
from the buffer pool or freed in a file, or the index is being dropped.
|
||||
@param page_id page identifier of the being-dropped page */
|
||||
void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept;
|
||||
@param mtr mini-transaction
|
||||
@param page_id page identifier of the being-dropped page */
|
||||
void btr_search_drop_page_hash_when_freed(mtr_t *mtr, const page_id_t page_id)
|
||||
noexcept;
|
||||
|
||||
/** Update the page hash index after a single record is inserted on a page.
|
||||
@param cursor cursor which was positioned before the inserted record
|
||||
|
||||
@@ -41,6 +41,8 @@ Created 11/5/1995 Heikki Tuuri
|
||||
#include "transactional_lock_guard.h"
|
||||
#include <ostream>
|
||||
|
||||
struct trx_t;
|
||||
|
||||
/** The allocation granularity of innodb_buffer_pool_size */
|
||||
constexpr size_t innodb_buffer_pool_extent_size=
|
||||
sizeof(size_t) < 8 ? 2 << 20 : 8 << 20;
|
||||
@@ -259,6 +261,12 @@ void
|
||||
buf_block_modify_clock_inc(
|
||||
/*=======================*/
|
||||
buf_block_t* block); /*!< in: block */
|
||||
|
||||
/** Increment the pages_accessed count. */
|
||||
void buf_inc_get(trx_t *trx) noexcept;
|
||||
|
||||
/** Increment the pages_accessed count. */
|
||||
void buf_inc_get() noexcept;
|
||||
#endif /* !UNIV_INNOCHECKSUM */
|
||||
|
||||
/** Check if a buffer is all zeroes.
|
||||
@@ -1023,12 +1031,16 @@ struct buf_pool_stat_t{
|
||||
/** Initialize the counters */
|
||||
void init() noexcept { memset((void*) this, 0, sizeof *this); }
|
||||
|
||||
ib_counter_t<ulint, ib_counter_element_t> n_page_gets;
|
||||
/*!< number of page gets performed;
|
||||
also successful searches through
|
||||
the adaptive hash index are
|
||||
counted as page gets;
|
||||
NOT protected by buf_pool.mutex */
|
||||
buf_pool_stat_t& operator=(const buf_pool_stat_t& other) noexcept {
|
||||
memcpy(reinterpret_cast<void*>(this), &other, sizeof *this);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** number of pages accessed; aggregates trx_t::pages_accessed */
|
||||
union {
|
||||
Atomic_counter<ulint> n_page_gets{0};
|
||||
ulint n_page_gets_nonatomic;
|
||||
};
|
||||
ulint n_pages_read; /*!< number read operations */
|
||||
ulint n_pages_written;/*!< number write operations */
|
||||
ulint n_pages_created;/*!< number of pages created
|
||||
@@ -1161,7 +1173,7 @@ public:
|
||||
|
||||
/** Resize the buffer pool.
|
||||
@param size requested innodb_buffer_pool_size in bytes
|
||||
@param thd current connnection */
|
||||
@param trx current connnection */
|
||||
ATTRIBUTE_COLD void resize(size_t size, THD *thd) noexcept;
|
||||
|
||||
/** Collect garbage (release pages from the LRU list) */
|
||||
@@ -1272,15 +1284,16 @@ public:
|
||||
the mode c=FIX_WAIT_READ must not be used.
|
||||
@param id page identifier
|
||||
@param err error code (will only be assigned when returning nullptr)
|
||||
@param trx transaction attached to current connection
|
||||
@param c how to handle conflicts
|
||||
@return undo log page, buffer-fixed
|
||||
@retval -1 if c=FIX_NOWAIT and buffer-fixing would require waiting
|
||||
@retval nullptr if the undo page was corrupted or freed */
|
||||
buf_block_t *page_fix(const page_id_t id, dberr_t *err,
|
||||
buf_block_t *page_fix(const page_id_t id, dberr_t *err, trx_t *trx,
|
||||
page_fix_conflicts c) noexcept;
|
||||
|
||||
buf_block_t *page_fix(const page_id_t id) noexcept
|
||||
{ return page_fix(id, nullptr, FIX_WAIT_READ); }
|
||||
buf_block_t *page_fix(const page_id_t id, trx_t *trx) noexcept
|
||||
{ return page_fix(id, nullptr, trx, FIX_WAIT_READ); }
|
||||
|
||||
/** Validate a block descriptor.
|
||||
@param b block descriptor that may be invalid after shrink()
|
||||
|
||||
@@ -38,6 +38,7 @@ Returns a new table, index, or space id. */
|
||||
void
|
||||
dict_hdr_get_new_id(
|
||||
/*================*/
|
||||
trx_t* trx, /*!< in/out: transaction */
|
||||
table_id_t* table_id, /*!< out: table id
|
||||
(not assigned if NULL) */
|
||||
index_id_t* index_id, /*!< out: index id
|
||||
|
||||
@@ -88,15 +88,6 @@ dict_build_index_def(
|
||||
dict_index_t* index, /*!< in/out: index */
|
||||
trx_t* trx); /*!< in/out: InnoDB transaction
|
||||
handle */
|
||||
/***************************************************************//**
|
||||
Creates an index tree for the index if it is not a member of a cluster.
|
||||
Don't update SYSTEM TABLES.
|
||||
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
|
||||
dberr_t
|
||||
dict_create_index_tree(
|
||||
/*===================*/
|
||||
dict_index_t* index, /*!< in/out: index */
|
||||
const trx_t* trx); /*!< in: InnoDB transaction handle */
|
||||
|
||||
/** Drop the index tree associated with a row in SYS_INDEXES table.
|
||||
@param[in,out] pcur persistent cursor on rec
|
||||
@@ -115,7 +106,7 @@ dberr_t
|
||||
dict_create_index_tree_in_mem(
|
||||
/*==========================*/
|
||||
dict_index_t* index, /*!< in/out: index */
|
||||
const trx_t* trx); /*!< in: InnoDB transaction handle */
|
||||
trx_t* trx); /*!< in: InnoDB transaction handle */
|
||||
|
||||
/********************************************************************//**
|
||||
Generate a foreign key constraint name when it was not named by the user.
|
||||
|
||||
@@ -1535,16 +1535,19 @@ dict_fs2utf8(
|
||||
|
||||
/** Flag an index corrupted both in the data dictionary cache
|
||||
and in the system table SYS_INDEXES.
|
||||
@param trx transaction
|
||||
@param index index to be flagged as corrupted
|
||||
@param ctx context (for error log reporting) */
|
||||
void dict_set_corrupted(dict_index_t *index, const char *ctx)
|
||||
void dict_set_corrupted(trx_t *trx, dict_index_t *index, const char *ctx)
|
||||
ATTRIBUTE_COLD __attribute__((nonnull));
|
||||
|
||||
/** Sets merge_threshold in the SYS_INDEXES
|
||||
@param[in] thd current_thd
|
||||
@param[in,out] index index
|
||||
@param[in] merge_threshold value to set */
|
||||
void
|
||||
dict_index_set_merge_threshold(
|
||||
const THD& thd,
|
||||
dict_index_t* index,
|
||||
ulint merge_threshold);
|
||||
|
||||
|
||||
@@ -61,14 +61,19 @@ dict_load_table_on_id(
|
||||
table_id_t table_id, /*!< in: table id */
|
||||
dict_err_ignore_t ignore_err); /*!< in: errors to ignore
|
||||
when loading the table */
|
||||
/********************************************************************//**
|
||||
This function is called when the database is booted.
|
||||
Loads system table index definitions except for the clustered index which
|
||||
is added to the dictionary cache at booting before calling this function. */
|
||||
void
|
||||
dict_load_sys_table(
|
||||
/*================*/
|
||||
dict_table_t* table); /*!< in: system table */
|
||||
/** Load definitions for table indexes. Adds them to the data dictionary cache.
|
||||
@param mtr mini-transaction
|
||||
@param table table definition
|
||||
@param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
|
||||
@param heap memory heap for temporary storage
|
||||
@param ignore_err errors to be ignored when loading the index definition
|
||||
@return error code
|
||||
@retval DB_SUCCESS if all indexes were successfully loaded
|
||||
@retval DB_CORRUPTION if corruption of dictionary table
|
||||
@retval DB_UNSUPPORTED if table has unknown index type */
|
||||
MY_ATTRIBUTE((nonnull))
|
||||
dberr_t dict_load_indexes(mtr_t *mtr, dict_table_t *table, bool uncommitted,
|
||||
mem_heap_t *heap, dict_err_ignore_t ignore_err);
|
||||
/***********************************************************************//**
|
||||
Loads foreign key constraints where the table is either the foreign key
|
||||
holder or where the table is referenced by a foreign key. Adds these
|
||||
@@ -82,6 +87,7 @@ cache, then it is added to the output parameter (fk_tables).
|
||||
dberr_t
|
||||
dict_load_foreigns(
|
||||
/*===============*/
|
||||
mtr_t& mtr, /*!< in/out: mini-transaction*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
const char** col_names, /*!< in: column names, or NULL
|
||||
to use table->col_names */
|
||||
@@ -96,7 +102,7 @@ dict_load_foreigns(
|
||||
which must be loaded
|
||||
subsequently to load all the
|
||||
foreign key constraints. */
|
||||
MY_ATTRIBUTE((nonnull(1)));
|
||||
MY_ATTRIBUTE((nonnull(2)));
|
||||
|
||||
/********************************************************************//**
|
||||
This function opens a system table, and return the first record.
|
||||
|
||||
@@ -1317,11 +1317,12 @@ public:
|
||||
vers_history_row(const rec_t* rec, const rec_offs* offsets);
|
||||
|
||||
/** Check if record in secondary index is historical row.
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in] rec record in a secondary index
|
||||
@param[out] history_row true if row is historical
|
||||
@return true on error */
|
||||
bool
|
||||
vers_history_row(const rec_t* rec, bool &history_row);
|
||||
vers_history_row(mtr_t *mtr, const rec_t* rec, bool &history_row);
|
||||
|
||||
/** Assign the number of new column to be added as a part
|
||||
of the index
|
||||
|
||||
@@ -30,21 +30,12 @@ Created Jan 06, 2010 Vasil Dimov
|
||||
#include "dict0types.h"
|
||||
#include "trx0types.h"
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
/** Update the table modification counter and if necessary,
|
||||
schedule new estimates for table and index statistics to be calculated.
|
||||
@param[in,out] table persistent or temporary table
|
||||
@param[in] trx transaction */
|
||||
void dict_stats_update_if_needed(dict_table_t *table, const trx_t &trx)
|
||||
@param[in,out] trx transaction */
|
||||
void dict_stats_update_if_needed(dict_table_t *table, trx_t &trx) noexcept
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
#else
|
||||
/** Update the table modification counter and if necessary,
|
||||
schedule new estimates for table and index statistics to be calculated.
|
||||
@param[in,out] table persistent or temporary table */
|
||||
void dict_stats_update_if_needed_func(dict_table_t *table)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
# define dict_stats_update_if_needed(t,trx) dict_stats_update_if_needed_func(t)
|
||||
#endif
|
||||
|
||||
/** Execute DELETE FROM mysql.innodb_table_stats
|
||||
@param database_name database name
|
||||
@@ -76,11 +67,8 @@ dberr_t dict_stats_delete_from_index_stats(const char *database_name,
|
||||
|
||||
/*********************************************************************//**
|
||||
Fetches or calculates new estimates for index statistics. */
|
||||
void
|
||||
dict_stats_update_for_index(
|
||||
/*========================*/
|
||||
dict_index_t* index) /*!< in/out: index */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
void dict_stats_update_for_index(trx_t *trx, dict_index_t *index) noexcept
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
enum dict_stats_schema_check {
|
||||
/** The InnoDB persistent statistics tables do not exist. */
|
||||
@@ -107,24 +95,27 @@ dberr_t dict_stats_fetch_from_ps(dict_table_t *table);
|
||||
/**
|
||||
Calculate new estimates for table and index statistics. This function
|
||||
is relatively quick and is used to calculate non-persistent statistics.
|
||||
@param trx transaction
|
||||
@param table table for which the non-persistent statistics are being updated
|
||||
@return error code
|
||||
@retval DB_SUCCESS_LOCKED REC if the table under bulk insert operation */
|
||||
dberr_t dict_stats_update_transient(dict_table_t *table) noexcept;
|
||||
dberr_t dict_stats_update_transient(trx_t *trx, dict_table_t *table) noexcept;
|
||||
|
||||
/**
|
||||
Calculate new estimates for table and index statistics. This function
|
||||
is slower than dict_stats_update_transient().
|
||||
@param trx transaction
|
||||
@param table table for which the persistent statistics are being updated
|
||||
@return DB_SUCCESS or error code
|
||||
@retval DB_SUCCESS_LOCKED_REC if the table under bulk insert operation */
|
||||
dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept;
|
||||
dberr_t dict_stats_update_persistent(trx_t *trx, dict_table_t *table) noexcept;
|
||||
|
||||
/**
|
||||
Try to calculate and save new estimates for persistent statistics.
|
||||
If persistent statistics are not enabled for the table or not available,
|
||||
this does nothing. */
|
||||
dberr_t dict_stats_update_persistent_try(dict_table_t *table);
|
||||
dberr_t dict_stats_update_persistent_try(trx_t *trx, dict_table_t *table)
|
||||
noexcept;
|
||||
|
||||
/** Rename a table in InnoDB persistent stats storage.
|
||||
@param old_name old table name
|
||||
|
||||
@@ -452,12 +452,14 @@ fseg_free_page(
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Determine whether a page is allocated.
|
||||
@param mtr mini-transaction
|
||||
@param space tablespace
|
||||
@param page page number
|
||||
@return error code
|
||||
@retval DB_SUCCESS if the page is marked as free
|
||||
@retval DB_SUCCESS_LOCKED_REC if the page is marked as allocated */
|
||||
dberr_t fseg_page_is_allocated(fil_space_t *space, unsigned page)
|
||||
dberr_t fseg_page_is_allocated(mtr_t *mtr, fil_space_t *space, unsigned page)
|
||||
noexcept
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
|
||||
@@ -530,12 +530,14 @@ rtr_info_reinit_in_cursor(
|
||||
needed */
|
||||
|
||||
/** Estimates the number of rows in a given area.
|
||||
@param[in,out] trx transaction
|
||||
@param[in] index index
|
||||
@param[in] tuple range tuple containing mbr, may also be empty tuple
|
||||
@param[in] mode search mode
|
||||
@return estimated number of rows */
|
||||
ha_rows
|
||||
rtr_estimate_n_rows_in_range(
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple,
|
||||
page_cur_mode_t mode);
|
||||
|
||||
@@ -173,6 +173,10 @@ but can be used for comparison.
|
||||
*/
|
||||
extern "C" unsigned long long thd_start_utime(const MYSQL_THD thd);
|
||||
|
||||
/** Obtain the InnoDB transaction of a MariaDB thread handle.
|
||||
@param thd current_thd
|
||||
@return InnoDB transaction */
|
||||
trx_t *thd_to_trx(const THD *thd) noexcept;
|
||||
|
||||
/** Determines the current SQL statement.
|
||||
Thread unsafe, can only be called from the thread owning the THD.
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2023, MariaDB Foundation
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
*****************************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ha_handler_stats.h"
|
||||
#include "my_rdtsc.h"
|
||||
|
||||
/* We do not want a dynamic initialization function to be
|
||||
conditionally invoked on each access to a C++11 extern thread_local. */
|
||||
#if __cplusplus >= 202002L
|
||||
# define simple_thread_local constinit thread_local
|
||||
#else
|
||||
# define simple_thread_local IF_WIN(__declspec(thread),__thread)
|
||||
#endif
|
||||
|
||||
/** Pointer to handler::active_handler_stats or nullptr (via .tbss) */
|
||||
extern simple_thread_local ha_handler_stats *mariadb_stats;
|
||||
|
||||
/*
|
||||
Returns nonzero if MariaDB wants engine status
|
||||
*/
|
||||
|
||||
inline uint mariadb_stats_active()
|
||||
{
|
||||
if (ha_handler_stats *stats= mariadb_stats)
|
||||
return stats->active;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The following functions increment different engine status */
|
||||
|
||||
inline void mariadb_increment_pages_accessed(ha_handler_stats *stats)
|
||||
{
|
||||
if (stats)
|
||||
stats->pages_accessed++;
|
||||
}
|
||||
|
||||
inline void mariadb_increment_pages_accessed()
|
||||
{
|
||||
mariadb_increment_pages_accessed(mariadb_stats);
|
||||
}
|
||||
|
||||
inline void mariadb_increment_pages_updated(ulonglong count)
|
||||
{
|
||||
if (ha_handler_stats *stats= mariadb_stats)
|
||||
stats->pages_updated+= count;
|
||||
}
|
||||
|
||||
inline void mariadb_increment_pages_read(ha_handler_stats *stats)
|
||||
{
|
||||
if (stats)
|
||||
stats->pages_read_count++;
|
||||
}
|
||||
|
||||
inline void mariadb_increment_pages_read()
|
||||
{
|
||||
mariadb_increment_pages_read(mariadb_stats);
|
||||
}
|
||||
|
||||
inline void mariadb_increment_undo_records_read()
|
||||
{
|
||||
if (ha_handler_stats *stats= mariadb_stats)
|
||||
stats->undo_records_read++;
|
||||
}
|
||||
|
||||
inline void mariadb_increment_pages_prefetched(ulint n_pages)
|
||||
{
|
||||
if (ha_handler_stats *stats= mariadb_stats)
|
||||
stats->pages_prefetched += n_pages;
|
||||
}
|
||||
|
||||
/*
|
||||
The following has to be identical code as measure() in sql_analyze_stmt.h
|
||||
|
||||
One should only call this if mariadb_stats_active() is true.
|
||||
*/
|
||||
|
||||
inline ulonglong mariadb_measure()
|
||||
{
|
||||
#if (MY_TIMER_ROUTINE_CYCLES)
|
||||
return my_timer_cycles();
|
||||
#else
|
||||
return my_timer_microseconds();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
Call this only of start_time != 0
|
||||
See buf0rea.cc for an example of how to use it efficiently
|
||||
*/
|
||||
|
||||
inline void mariadb_increment_pages_read_time(ulonglong start_time)
|
||||
{
|
||||
ha_handler_stats *stats= mariadb_stats;
|
||||
ulonglong end_time= mariadb_measure();
|
||||
/* Check that we only call this if active, see example! */
|
||||
DBUG_ASSERT(start_time);
|
||||
DBUG_ASSERT(stats->active);
|
||||
|
||||
stats->pages_read_time+= (end_time - start_time);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Helper class to set mariadb_stats temporarly for one call in handler.cc
|
||||
*/
|
||||
|
||||
class mariadb_set_stats
|
||||
{
|
||||
public:
|
||||
mariadb_set_stats(ha_handler_stats *stats)
|
||||
{
|
||||
mariadb_stats= stats;
|
||||
}
|
||||
~mariadb_set_stats()
|
||||
{
|
||||
mariadb_stats= nullptr;
|
||||
}
|
||||
};
|
||||
@@ -65,7 +65,7 @@ struct mtr_memo_slot_t
|
||||
|
||||
/** Mini-transaction handle and buffer */
|
||||
struct mtr_t {
|
||||
mtr_t();
|
||||
mtr_t(trx_t *trx/*= nullptr*/);
|
||||
~mtr_t();
|
||||
|
||||
/** Start a mini-transaction. */
|
||||
@@ -786,6 +786,12 @@ private:
|
||||
|
||||
/** CRC-32C of m_log */
|
||||
uint32_t m_crc;
|
||||
public:
|
||||
/** dummy or real transaction associated with the mini-transaction */
|
||||
trx_t *const trx;
|
||||
private:
|
||||
/** user tablespace that is being modified by the mini-transaction */
|
||||
fil_space_t *m_user_space;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Persistent user tablespace associated with the
|
||||
@@ -799,9 +805,6 @@ private:
|
||||
/** mini-transaction log */
|
||||
mtr_buf_t m_log;
|
||||
|
||||
/** user tablespace that is being modified by the mini-transaction */
|
||||
fil_space_t* m_user_space;
|
||||
|
||||
/** LSN at commit time */
|
||||
lsn_t m_commit_lsn;
|
||||
|
||||
|
||||
@@ -83,15 +83,6 @@ inline void row_log_abort_sec(dict_index_t *index)
|
||||
index->online_log= nullptr;
|
||||
}
|
||||
|
||||
/** Logs an operation to a secondary index that is (or was) being created.
|
||||
@param index index, S or X latched
|
||||
@param tuple index tuple
|
||||
@param trx_id transaction ID for insert, or 0 for delete
|
||||
@retval false if row_log_apply() failure happens
|
||||
or true otherwise */
|
||||
bool row_log_online_op(dict_index_t *index, const dtuple_t *tuple,
|
||||
trx_id_t trx_id) ATTRIBUTE_COLD;
|
||||
|
||||
/******************************************************//**
|
||||
Gets the error status of the online index rebuild log.
|
||||
@return DB_SUCCESS or error code */
|
||||
@@ -201,7 +192,7 @@ row_log_get_max_trx(
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Apply the row log to the index upon completing index creation.
|
||||
@param[in] trx transaction (for checking if the operation was
|
||||
@param[in,out] trx transaction (for checking if the operation was
|
||||
interrupted)
|
||||
@param[in,out] index secondary index
|
||||
@param[in,out] table MySQL table (for reporting duplicates)
|
||||
@@ -211,7 +202,7 @@ stage->inc() will be called for each block of log that is applied.
|
||||
@return DB_SUCCESS, or error code on failure */
|
||||
dberr_t
|
||||
row_log_apply(
|
||||
const trx_t* trx,
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
struct TABLE* table,
|
||||
ut_stage_alter_t* stage)
|
||||
|
||||
@@ -129,6 +129,7 @@ struct index_def_t {
|
||||
/** Structure for reporting duplicate records. */
|
||||
struct row_merge_dup_t {
|
||||
dict_index_t* index; /*!< index being sorted */
|
||||
trx_t* trx; /*!< transaction */
|
||||
struct TABLE* table; /*!< MySQL table object */
|
||||
const ulint* col_map;/*!< mapping of column numbers
|
||||
in table to the rebuilt table
|
||||
|
||||
@@ -83,6 +83,8 @@ struct purge_node_t
|
||||
dtuple_t *row;
|
||||
/** nullptr, or the next index of table whose record should be handled */
|
||||
dict_index_t *index;
|
||||
/** dummy transaction associated with current_thd */
|
||||
trx_t *trx;
|
||||
/** memory heap used as auxiliary storage; must be emptied between rows */
|
||||
mem_heap_t *heap;
|
||||
/** persistent cursor to the clustered index record */
|
||||
|
||||
@@ -134,7 +134,6 @@ which should be seen by a semi-consistent read. */
|
||||
void
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
/*====================================*/
|
||||
trx_t* caller_trx,/*!<in/out: trx of current thread */
|
||||
const rec_t* rec, /*!< in: record in a clustered index; the
|
||||
caller must have a latch on the page; this
|
||||
latch locks the top of the stack of versions
|
||||
|
||||
@@ -583,8 +583,6 @@ struct export_var_t{
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint innodb_buffer_pool_pages_latched; /*!< Latched pages */
|
||||
#endif /* UNIV_DEBUG */
|
||||
/** buf_pool.stat.n_page_gets (a sharded counter) */
|
||||
ulint innodb_buffer_pool_read_requests;
|
||||
ulint innodb_checkpoint_age;
|
||||
ulint innodb_checkpoint_max_age;
|
||||
ulint innodb_data_pending_reads; /*!< Pending reads */
|
||||
|
||||
@@ -50,10 +50,11 @@ void trx_purge_truncate_history();
|
||||
|
||||
/**
|
||||
Run a purge batch.
|
||||
@param trx dummy transaction associated with the purge coordinator
|
||||
@param n_tasks number of purge tasks to submit to the queue
|
||||
@param history_size trx_sys.history_size()
|
||||
@return number of undo log pages handled in the batch */
|
||||
ulint trx_purge(ulint n_tasks, ulint history_size);
|
||||
ulint trx_purge(trx_t *trx, ulint n_tasks, ulint history_size) noexcept;
|
||||
|
||||
/** The control structure used in the purge operation */
|
||||
class purge_sys_t
|
||||
@@ -185,9 +186,10 @@ public:
|
||||
|
||||
/** Look up an undo log page.
|
||||
@param id undo page identifier
|
||||
@param trx transaction attached to current_thd
|
||||
@return undo page
|
||||
@retval nullptr in case the page is corrupted */
|
||||
buf_block_t *get_page(page_id_t id);
|
||||
buf_block_t *get_page(page_id_t id, trx_t *trx);
|
||||
|
||||
que_t* query; /*!< The query graph which will do the
|
||||
parallelized purge operation */
|
||||
@@ -342,29 +344,34 @@ public:
|
||||
private:
|
||||
/**
|
||||
Get the next record to purge and update the info in the purge system.
|
||||
@param trx transaction attached to current_thd
|
||||
@param roll_ptr undo log pointer to the record
|
||||
@return buffer-fixed reference to undo log record
|
||||
@retval {nullptr,1} if the whole undo log can skipped in purge
|
||||
@retval {nullptr,0} if nothing is left, or on corruption */
|
||||
inline trx_purge_rec_t get_next_rec(roll_ptr_t roll_ptr);
|
||||
inline trx_purge_rec_t get_next_rec(trx_t *trx, roll_ptr_t roll_ptr)
|
||||
noexcept;
|
||||
|
||||
/** Choose the next undo log to purge.
|
||||
@param trx transaction attached to current_thd
|
||||
@return whether anything is to be purged */
|
||||
bool choose_next_log();
|
||||
bool choose_next_log(trx_t *trx) noexcept;
|
||||
|
||||
/** Update the last not yet purged history log info in rseg when
|
||||
we have purged a whole undo log. Advances also purge_trx_no
|
||||
past the purged log.
|
||||
@param trx transaction attached to current_thd
|
||||
@return whether anything is to be purged */
|
||||
bool rseg_get_next_history_log();
|
||||
bool rseg_get_next_history_log(trx_t *trx) noexcept;
|
||||
|
||||
public:
|
||||
/**
|
||||
Fetch the next undo log record from the history list to purge.
|
||||
@param trx transaction attached to current_thd
|
||||
@return buffer-fixed reference to undo log record
|
||||
@retval {nullptr,1} if the whole undo log can skipped in purge
|
||||
@retval {nullptr,0} if nothing is left, or on corruption */
|
||||
inline trx_purge_rec_t fetch_next_rec();
|
||||
inline trx_purge_rec_t fetch_next_rec(trx_t *trx) noexcept;
|
||||
|
||||
/** Determine if the history of a transaction is purgeable.
|
||||
@param trx_id transaction identifier
|
||||
@@ -445,9 +452,10 @@ public:
|
||||
inline ~view_guard();
|
||||
/** Fetch an undo log page.
|
||||
@param id page identifier
|
||||
@param trx transaction attached to current_thd
|
||||
@param mtr mini-transaction
|
||||
@return reference to buffer page, possibly buffer-fixed in mtr */
|
||||
inline const buf_block_t *get(const page_id_t id, mtr_t *mtr);
|
||||
inline const buf_block_t *get(const page_id_t id, trx_t *trx, mtr_t *mtr);
|
||||
|
||||
/** @return purge_sys.view or purge_sys.end_view */
|
||||
inline const ReadViewBase &view() const;
|
||||
|
||||
@@ -43,6 +43,7 @@ Created 3/26/1996 Heikki Tuuri
|
||||
// Forward declaration
|
||||
struct mtr_t;
|
||||
struct rw_trx_hash_element_t;
|
||||
class ha_handler_stats;
|
||||
|
||||
/******************************************************************//**
|
||||
Set detailed error message for the transaction. */
|
||||
@@ -836,7 +837,7 @@ public:
|
||||
defer flush of the logs to disk
|
||||
until after we release the
|
||||
mutex. */
|
||||
ulint duplicates; /*!< TRX_DUP_IGNORE | TRX_DUP_REPLACE */
|
||||
byte duplicates; /*!< TRX_DUP_IGNORE | TRX_DUP_REPLACE */
|
||||
/** whether this modifies InnoDB dictionary tables */
|
||||
bool dict_operation;
|
||||
#ifdef UNIV_DEBUG
|
||||
@@ -857,6 +858,11 @@ public:
|
||||
THD* mysql_thd; /*!< MySQL thread handle corresponding
|
||||
to this trx, or NULL */
|
||||
|
||||
/** EXPLAIN ANALYZE statistics, or nullptr if not active */
|
||||
ha_handler_stats *active_handler_stats;
|
||||
/** number of pages accessed in the buffer pool */
|
||||
size_t pages_accessed;
|
||||
|
||||
const char* mysql_log_file_name;
|
||||
/*!< if MySQL binlog is used, this field
|
||||
contains a pointer to the latest file
|
||||
@@ -1015,16 +1021,13 @@ private:
|
||||
/** Process tables that were modified by the committing transaction. */
|
||||
inline void commit_tables();
|
||||
/** Mark a transaction committed in the main memory data structures.
|
||||
@param mtr mini-transaction (if there are any persistent modifications) */
|
||||
inline void commit_in_memory(const mtr_t *mtr);
|
||||
/** Write log for committing the transaction. */
|
||||
@param mtr mini-transaction */
|
||||
inline void commit_in_memory(mtr_t *mtr);
|
||||
/** Commit the transaction in the file system. */
|
||||
void commit_persist() noexcept;
|
||||
/** Clean up the transaction after commit_in_memory()
|
||||
@return false (always) */
|
||||
@retval false (always) */
|
||||
bool commit_cleanup() noexcept;
|
||||
/** Commit the transaction in a mini-transaction.
|
||||
@param mtr mini-transaction (if there are any persistent modifications) */
|
||||
void commit_low(mtr_t *mtr= nullptr);
|
||||
/** Commit an empty transaction.
|
||||
@param mtr mini-transaction */
|
||||
void commit_empty(mtr_t *mtr);
|
||||
@@ -1035,8 +1038,9 @@ private:
|
||||
@param mtr mini-transaction */
|
||||
inline void write_serialisation_history(mtr_t *mtr);
|
||||
public:
|
||||
/** Commit the transaction. */
|
||||
void commit() noexcept;
|
||||
/** Commit the transaction.
|
||||
@retval false (always) */
|
||||
bool commit() noexcept;
|
||||
|
||||
/** Try to drop a persistent table.
|
||||
@param table persistent table
|
||||
|
||||
@@ -153,7 +153,7 @@ dberr_t trx_undo_free_last_page(trx_undo_t *undo, mtr_t *mtr)
|
||||
/** Try to truncate the undo logs.
|
||||
@param trx transaction
|
||||
@return error code */
|
||||
dberr_t trx_undo_try_truncate(const trx_t &trx);
|
||||
dberr_t trx_undo_try_truncate(trx_t *trx);
|
||||
|
||||
/** Truncate the head of an undo log.
|
||||
NOTE that only whole pages are freed; the header page is not
|
||||
@@ -172,24 +172,20 @@ trx_undo_truncate_start(
|
||||
undo_no_t limit)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
/** Mark that an undo log header belongs to a data dictionary transaction.
|
||||
@param[in] trx dictionary transaction
|
||||
@param[in,out] undo undo log
|
||||
@param[in,out] mtr mini-transaction */
|
||||
void trx_undo_mark_as_dict(const trx_t* trx, trx_undo_t* undo, mtr_t* mtr);
|
||||
void trx_undo_mark_as_dict(trx_undo_t* undo, mtr_t* mtr);
|
||||
/** Assign an undo log for a persistent transaction.
|
||||
A new undo log is created or a cached undo log reused.
|
||||
@param[in,out] trx transaction
|
||||
@param[out] err error code
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[out] err error code
|
||||
@return the undo log block
|
||||
@retval NULL on error */
|
||||
buf_block_t*
|
||||
trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
|
||||
buf_block_t *trx_undo_assign(mtr_t* mtr, dberr_t *err) noexcept
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
/** Assign an undo log for a transaction.
|
||||
A new undo log is created or a cached undo log reused.
|
||||
@tparam is_temp whether this is temporary undo log
|
||||
@param[in,out] trx transaction
|
||||
@param[in] rseg rollback segment
|
||||
@param[out] undo the undo log
|
||||
@param[in,out] mtr mini-transaction
|
||||
@@ -198,18 +194,16 @@ A new undo log is created or a cached undo log reused.
|
||||
@retval nullptr on error */
|
||||
template<bool is_temp>
|
||||
buf_block_t*
|
||||
trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
|
||||
mtr_t *mtr, dberr_t *err)
|
||||
trx_undo_assign_low(mtr_t *mtr, dberr_t *err, trx_rseg_t *rseg,
|
||||
trx_undo_t **undo)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK.
|
||||
@param[in,out] trx transaction
|
||||
@param[in,out] undo undo log
|
||||
@param[in] rollback false=XA PREPARE, true=XA ROLLBACK
|
||||
@param[in,out] mtr mini-transaction */
|
||||
void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback,
|
||||
mtr_t *mtr)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
void trx_undo_set_state_at_prepare(trx_undo_t *undo, bool rollback, mtr_t *mtr)
|
||||
noexcept MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/** At shutdown, frees the undo logs of a transaction. */
|
||||
void
|
||||
@@ -312,8 +306,8 @@ class UndorecApplier
|
||||
mtr_t mtr;
|
||||
|
||||
public:
|
||||
UndorecApplier(page_id_t page_id, trx_id_t trx_id) :
|
||||
page_id(page_id), trx_id(trx_id), heap(mem_heap_create(100))
|
||||
UndorecApplier(page_id_t page_id, trx_t &trx) :
|
||||
page_id(page_id), trx_id(trx.id), heap(mem_heap_create(100)), mtr(&trx)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -562,7 +562,7 @@ static void wsrep_assert_valid_bf_bf_wait(const lock_t *lock, const trx_t *trx,
|
||||
<< ((type_mode & LOCK_INSERT_INTENTION) ? " INSERT INTENTION " : " ")
|
||||
<< ((type_mode & LOCK_X) ? " LOCK_X " : " LOCK_S ");
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
ib::error() << "Conflicting lock on table: "
|
||||
<< lock->index->table->name
|
||||
@@ -682,7 +682,7 @@ bool wsrep_is_BF_lock_timeout(const trx_t &trx)
|
||||
now.val - suspend_time.val);
|
||||
|
||||
if (!wait_lock->is_table()) {
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
lock_rec_print(stderr, wait_lock, mtr);
|
||||
} else {
|
||||
lock_table_print(stderr, wait_lock);
|
||||
@@ -1084,7 +1084,7 @@ void wsrep_report_error(const lock_t* victim_lock, const trx_t *bf_trx)
|
||||
{
|
||||
// We have conflicting BF-BF case, these threads
|
||||
// should not execute concurrently
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
WSREP_ERROR("BF request is not compatible with victim");
|
||||
WSREP_ERROR("BF requesting lock: ");
|
||||
lock_rec_print(stderr, bf_trx->lock.wait_lock, mtr);
|
||||
@@ -4843,7 +4843,7 @@ static bool lock_release_on_prepare_try(trx_t *trx, bool unlock_unmodified)
|
||||
DBUG_ASSERT(trx->state == TRX_STATE_PREPARED);
|
||||
|
||||
bool all_released= true;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs *offsets= offsets_;
|
||||
rec_offs_init(offsets_);
|
||||
@@ -5023,7 +5023,7 @@ void lock_release_on_prepare(trx_t *trx)
|
||||
skip_try:
|
||||
#endif
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
/* Reserve enough offsets for the key and PRIMARY KEY. */
|
||||
rec_offs offsets_[REC_OFFS_HEADER_SIZE + 2 * MAX_REF_PARTS + 1];
|
||||
rec_offs *offsets= offsets_;
|
||||
@@ -5395,7 +5395,7 @@ void lock_trx_print_wait_and_mvcc_state(FILE *file, const trx_t *trx,
|
||||
now.val - suspend_time.val);
|
||||
|
||||
if (!wait_lock->is_table()) {
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
lock_rec_print(file, wait_lock, mtr);
|
||||
} else {
|
||||
lock_table_print(file, wait_lock);
|
||||
@@ -5414,7 +5414,7 @@ lock_trx_print_locks(
|
||||
FILE* file, /*!< in/out: File to write */
|
||||
const trx_t* trx) /*!< in: current transaction */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
uint32_t i= 0;
|
||||
/* Iterate over the transaction's locks. */
|
||||
lock_sys.assert_locked();
|
||||
@@ -5852,7 +5852,7 @@ static void lock_rec_block_validate(const page_id_t page_id)
|
||||
this point. */
|
||||
|
||||
buf_block_t* block;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
/* Transactional locks should never refer to dropped
|
||||
tablespaces, because all DDL operations that would drop or
|
||||
@@ -7121,7 +7121,7 @@ namespace Deadlock
|
||||
|
||||
if (!lock.is_table())
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{lock.trx};
|
||||
lock_rec_print(lock_latest_err_file, &lock, mtr);
|
||||
|
||||
if (srv_print_all_deadlocks)
|
||||
|
||||
@@ -1072,7 +1072,7 @@ fil_space_t *recv_sys_t::recover_deferred(const recv_sys_t::map::iterator &p,
|
||||
|
||||
if (!p->first.page_no() && p->second.skip_read)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
ut_ad(!p->second.being_processed);
|
||||
p->second.being_processed= 1;
|
||||
lsn_t init_lsn= mlog_init.last(p->first);
|
||||
@@ -3520,7 +3520,7 @@ ATTRIBUTE_COLD void recv_sys_t::set_corrupt_fs() noexcept
|
||||
@return whether the page was recovered correctly */
|
||||
bool recv_recover_page(fil_space_t* space, buf_page_t* bpage)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
|
||||
@@ -3573,7 +3573,7 @@ void IORequest::fake_read_complete(os_offset_t offset) const noexcept
|
||||
ut_ad(recv_recovery_is_on());
|
||||
ut_ad(offset);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
|
||||
@@ -3906,7 +3906,7 @@ recv_sys_t::recover(const page_id_t page_id, mtr_t *mtr, dberr_t *err)
|
||||
buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex);
|
||||
buf_block_t *block;
|
||||
{
|
||||
mtr_t local_mtr;
|
||||
mtr_t local_mtr{nullptr};
|
||||
block= recover_low(p, local_mtr, free_block, init_lsn);
|
||||
}
|
||||
p->second.being_processed= -1;
|
||||
|
||||
@@ -34,8 +34,8 @@ Created 11/26/1995 Heikki Tuuri
|
||||
#endif
|
||||
#include "btr0cur.h"
|
||||
#include "srv0start.h"
|
||||
#include "trx0trx.h"
|
||||
#include "log.h"
|
||||
#include "mariadb_stats.h"
|
||||
#include "my_cpu.h"
|
||||
|
||||
#ifdef HAVE_PMEM
|
||||
@@ -174,7 +174,7 @@ inline void buf_pool_t::insert_into_flush_list(buf_page_t *prev,
|
||||
block->page.set_oldest_modification(lsn);
|
||||
}
|
||||
|
||||
mtr_t::mtr_t()= default;
|
||||
mtr_t::mtr_t(trx_t *trx) : trx(trx) {}
|
||||
mtr_t::~mtr_t()= default;
|
||||
|
||||
/** Start a mini-transaction. */
|
||||
@@ -183,7 +183,9 @@ void mtr_t::start()
|
||||
ut_ad(m_memo.empty());
|
||||
ut_ad(!m_freed_pages);
|
||||
ut_ad(!m_freed_space);
|
||||
MEM_CHECK_DEFINED(&trx, sizeof trx);
|
||||
MEM_UNDEFINED(this, sizeof *this);
|
||||
MEM_MAKE_DEFINED(&trx, sizeof trx);
|
||||
MEM_MAKE_DEFINED(&m_memo, sizeof m_memo);
|
||||
MEM_MAKE_DEFINED(&m_freed_space, sizeof m_freed_space);
|
||||
MEM_MAKE_DEFINED(&m_freed_pages, sizeof m_freed_pages);
|
||||
@@ -452,7 +454,9 @@ void mtr_t::commit_log(mtr_t *mtr, std::pair<lsn_t,page_flush_ahead> lsns)
|
||||
mtr->m_memo.clear();
|
||||
}
|
||||
|
||||
mariadb_increment_pages_updated(modified);
|
||||
if (modified != 0 && mtr->trx)
|
||||
if (ha_handler_stats *stats= mtr->trx->active_handler_stats)
|
||||
stats->pages_updated+= modified;
|
||||
|
||||
if (UNIV_UNLIKELY(lsns.second != PAGE_FLUSH_NO))
|
||||
buf_flush_ahead(mtr->m_commit_lsn, lsns.second == PAGE_FLUSH_SYNC);
|
||||
@@ -515,9 +519,6 @@ void mtr_t::rollback_to_savepoint(ulint begin, ulint end)
|
||||
{
|
||||
const mtr_memo_slot_t &slot= m_memo[s];
|
||||
ut_ad(slot.object);
|
||||
/* This is intended for releasing latches on indexes or unmodified
|
||||
buffer pool pages. */
|
||||
ut_ad(slot.type <= MTR_MEMO_SX_LOCK);
|
||||
ut_ad(!(slot.type & MTR_MEMO_MODIFY));
|
||||
slot.release();
|
||||
}
|
||||
|
||||
@@ -300,7 +300,7 @@ struct fil_iterator_t {
|
||||
class RecIterator {
|
||||
public:
|
||||
/** Default constructor */
|
||||
RecIterator() UNIV_NOTHROW
|
||||
RecIterator() noexcept : m_mtr{nullptr}
|
||||
{
|
||||
memset(&m_cur, 0x0, sizeof(m_cur));
|
||||
/* Make page_cur_delete_rec() happy. */
|
||||
@@ -396,7 +396,7 @@ public:
|
||||
trx_t* trx,
|
||||
dict_index_t* index) UNIV_NOTHROW
|
||||
:
|
||||
m_trx(trx),
|
||||
m_mtr{trx},
|
||||
m_index(index),
|
||||
m_n_rows(0)
|
||||
{
|
||||
@@ -444,7 +444,6 @@ protected:
|
||||
IndexPurge &operator=(const IndexPurge&);
|
||||
|
||||
private:
|
||||
trx_t* m_trx; /*!< User transaction */
|
||||
mtr_t m_mtr; /*!< Mini-transaction */
|
||||
btr_pcur_t m_pcur; /*!< Persistent cursor */
|
||||
dict_index_t* m_index; /*!< Index to be processed */
|
||||
@@ -1647,7 +1646,7 @@ dberr_t IndexPurge::next() noexcept
|
||||
|
||||
if (!btr_pcur_is_after_last_on_page(&m_pcur)) {
|
||||
return(DB_SUCCESS);
|
||||
} else if (trx_is_interrupted(m_trx)) {
|
||||
} else if (trx_is_interrupted(m_mtr.trx)) {
|
||||
/* Check after every page because the check
|
||||
is expensive. */
|
||||
return(DB_INTERRUPTED);
|
||||
@@ -2191,7 +2190,7 @@ dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW
|
||||
/* If we already had an old page with matching number in the buffer
|
||||
pool, evict it now, because we no longer evict the pages on
|
||||
DISCARD TABLESPACE. */
|
||||
if (buf_block_t *b= buf_pool.page_fix(block->page.id(), nullptr,
|
||||
if (buf_block_t *b= buf_pool.page_fix(block->page.id(), nullptr, nullptr,
|
||||
buf_pool_t::FIX_ALSO_FREED))
|
||||
{
|
||||
ut_ad(!b->page.oldest_modification());
|
||||
@@ -2325,7 +2324,7 @@ row_import_cleanup(row_prebuilt_t* prebuilt,
|
||||
}
|
||||
|
||||
btr_cur_t cur;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{prebuilt->trx};
|
||||
mtr.start();
|
||||
err = cur.open_leaf(false, dict_table_get_first_index(table),
|
||||
BTR_SEARCH_LEAF, &mtr);
|
||||
@@ -2404,7 +2403,7 @@ row_import_adjust_root_pages_of_secondary_indexes(
|
||||
/* Update the Btree segment headers for index node and
|
||||
leaf nodes in the root page. Set the new space id. */
|
||||
|
||||
err = btr_root_adjust_on_import(index);
|
||||
err = btr_root_adjust_on_import(trx, index);
|
||||
} else {
|
||||
ib::warn() << "Skip adjustment of root pages for"
|
||||
" index " << index->name << ".";
|
||||
@@ -4623,7 +4622,8 @@ static void row_import_autoinc(dict_table_t *table, row_prebuilt_t *prebuilt,
|
||||
|
||||
if (autoinc)
|
||||
{
|
||||
btr_write_autoinc(dict_table_get_first_index(table), autoinc - 1);
|
||||
btr_write_autoinc(prebuilt->trx,
|
||||
dict_table_get_first_index(table), autoinc - 1);
|
||||
autoinc_set:
|
||||
table->autoinc= autoinc;
|
||||
sql_print_information("InnoDB: %.*sQ.%sQ autoinc value set to " UINT64PF,
|
||||
@@ -4692,7 +4692,7 @@ dberr_t innodb_insert_hidden_fts_col(dict_table_t* table,
|
||||
}
|
||||
pars_info_t *info= pars_info_create();
|
||||
pars_info_add_ull_literal(info, "id", table->id);
|
||||
dict_hdr_get_new_id(NULL, &fts_idx->id, NULL);
|
||||
dict_hdr_get_new_id(trx, NULL, &fts_idx->id, NULL);
|
||||
pars_info_add_ull_literal(info, "idx_id", fts_idx->id);
|
||||
pars_info_add_int4_literal(info, "pos", fts_pos);
|
||||
pars_info_add_int4_literal(info, "space", fts_idx->table->space_id);
|
||||
@@ -4764,9 +4764,9 @@ row_import_for_mysql(
|
||||
|
||||
/* TODO: Do not write any undo log for the IMPORT cleanup. */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
trx_undo_assign(trx, &err, &mtr);
|
||||
trx_undo_assign(&mtr, &err);
|
||||
mtr.commit();
|
||||
}
|
||||
|
||||
@@ -4966,7 +4966,7 @@ import_error:
|
||||
/* Update the Btree segment headers for index node and
|
||||
leaf nodes in the root page. Set the new space id. */
|
||||
|
||||
err = btr_root_adjust_on_import(index);
|
||||
err = btr_root_adjust_on_import(trx, index);
|
||||
|
||||
DBUG_EXECUTE_IF("ib_import_cluster_root_adjust_failure",
|
||||
err = DB_CORRUPTION;);
|
||||
|
||||
@@ -1473,8 +1473,8 @@ row_ins_check_foreign_constraint(
|
||||
ulint n_fields_cmp;
|
||||
btr_pcur_t pcur;
|
||||
int cmp;
|
||||
mtr_t mtr;
|
||||
trx_t* trx = thr_get_trx(thr);
|
||||
mtr_t mtr{trx};
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets = offsets_;
|
||||
@@ -1698,7 +1698,7 @@ row_ins_check_foreign_constraint(
|
||||
vers_history_row(rec,
|
||||
offsets);
|
||||
} else if (check_index->
|
||||
vers_history_row(rec,
|
||||
vers_history_row(&mtr, rec,
|
||||
history_row)) {
|
||||
break;
|
||||
}
|
||||
@@ -2024,9 +2024,9 @@ row_ins_dupl_error_with_rec(
|
||||
@retval DB_SUCCESS on success
|
||||
@retval DB_FOREIGN_DUPLICATE_KEY if a history row was inserted by trx */
|
||||
static dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec,
|
||||
const trx_t& trx)
|
||||
trx_t *trx)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
dberr_t ret= DB_SUCCESS;
|
||||
dict_index_t *clust_index= dict_table_get_first_index(index->table);
|
||||
ut_ad(index != clust_index);
|
||||
@@ -2051,7 +2051,7 @@ static dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec,
|
||||
clust_index->n_uniq, &trx_id_len);
|
||||
ut_ad(trx_id_len == DATA_TRX_ID_LEN);
|
||||
|
||||
if (trx.id == trx_read_trx_id(trx_id))
|
||||
if (trx->id == trx_read_trx_id(trx_id))
|
||||
ret= DB_FOREIGN_DUPLICATE_KEY;
|
||||
}
|
||||
|
||||
@@ -2180,7 +2180,7 @@ row_ins_scan_sec_index_for_duplicate(
|
||||
if (!index->table->versioned()) {
|
||||
} else if (dberr_t e =
|
||||
vers_row_same_trx(index, rec,
|
||||
*trx)) {
|
||||
trx)) {
|
||||
err = e;
|
||||
goto end_scan;
|
||||
}
|
||||
@@ -2511,8 +2511,8 @@ of a clustered index entry.
|
||||
@param[in] big_rec externally stored fields
|
||||
@param[in,out] offsets rec_get_offsets()
|
||||
@param[in,out] heap memory heap
|
||||
@param[in] thd client connection, or NULL
|
||||
@param[in] index clustered index
|
||||
@param[in] trx transaction
|
||||
@return error code
|
||||
@retval DB_SUCCESS
|
||||
@retval DB_OUT_OF_FILE_SPACE */
|
||||
@@ -2524,16 +2524,16 @@ row_ins_index_entry_big_rec(
|
||||
rec_offs* offsets,
|
||||
mem_heap_t** heap,
|
||||
dict_index_t* index,
|
||||
const void* thd __attribute__((unused)))
|
||||
trx_t* trx)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
btr_pcur_t pcur;
|
||||
rec_t* rec;
|
||||
|
||||
pcur.btr_cur.page_cur.index = index;
|
||||
ut_ad(index->is_primary());
|
||||
|
||||
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern_latch");
|
||||
DEBUG_SYNC_C_IF_THD(trx->mysql_thd, "before_row_ins_extern_latch");
|
||||
|
||||
mtr.start();
|
||||
if (index->table->is_temporary()) {
|
||||
@@ -2552,10 +2552,10 @@ row_ins_index_entry_big_rec(
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
|
||||
ULINT_UNDEFINED, heap);
|
||||
|
||||
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern");
|
||||
DEBUG_SYNC_C_IF_THD(trx->mysql_thd, "before_row_ins_extern");
|
||||
error = btr_store_big_rec_extern_fields(
|
||||
&pcur, offsets, big_rec, &mtr, BTR_STORE_INSERT);
|
||||
DEBUG_SYNC_C_IF_THD(thd, "after_row_ins_extern");
|
||||
DEBUG_SYNC_C_IF_THD(trx->mysql_thd, "after_row_ins_extern");
|
||||
|
||||
mtr.commit();
|
||||
|
||||
@@ -2659,13 +2659,13 @@ row_ins_clust_index_entry_low(
|
||||
btr_pcur_t pcur;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
big_rec_t* big_rec = NULL;
|
||||
mtr_t mtr;
|
||||
uint64_t auto_inc = 0;
|
||||
mem_heap_t* offsets_heap = NULL;
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets = offsets_;
|
||||
rec_offs_init(offsets_);
|
||||
trx_t* trx = thr_get_trx(thr);
|
||||
mtr_t mtr{trx};
|
||||
buf_block_t* block;
|
||||
|
||||
DBUG_ENTER("row_ins_clust_index_entry_low");
|
||||
@@ -2986,7 +2986,7 @@ do_insert:
|
||||
log_write_up_to(mtr.commit_lsn(), true););
|
||||
err = row_ins_index_entry_big_rec(
|
||||
entry, big_rec, offsets, &offsets_heap, index,
|
||||
trx->mysql_thd);
|
||||
trx);
|
||||
dtuple_convert_back_big_rec(index, entry, big_rec);
|
||||
}
|
||||
}
|
||||
@@ -3045,7 +3045,8 @@ row_ins_sec_index_entry_low(
|
||||
btr_latch_mode search_mode = mode;
|
||||
dberr_t err;
|
||||
ulint n_unique;
|
||||
mtr_t mtr;
|
||||
trx_t*const trx{thr_get_trx(thr)};
|
||||
mtr_t mtr{trx};
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets = offsets_;
|
||||
rec_offs_init(offsets_);
|
||||
@@ -3056,7 +3057,7 @@ row_ins_sec_index_entry_low(
|
||||
|
||||
cursor.rtr_info = NULL;
|
||||
cursor.page_cur.index = index;
|
||||
ut_ad(thr_get_trx(thr)->id != 0);
|
||||
ut_ad(trx->id != 0);
|
||||
|
||||
mtr.start();
|
||||
|
||||
@@ -3110,7 +3111,7 @@ row_ins_sec_index_entry_low(
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
if (err == DB_DECRYPTION_FAILED) {
|
||||
innodb_decryption_failed(thr_get_trx(thr)->mysql_thd,
|
||||
innodb_decryption_failed(trx->mysql_thd,
|
||||
index->table);
|
||||
}
|
||||
goto func_exit;
|
||||
@@ -3147,8 +3148,7 @@ row_ins_sec_index_entry_low(
|
||||
break;
|
||||
case DB_DUPLICATE_KEY:
|
||||
if (!index->is_committed()) {
|
||||
ut_ad(!thr_get_trx(thr)
|
||||
->dict_operation_lock_mode);
|
||||
ut_ad(!trx->dict_operation_lock_mode);
|
||||
index->type |= DICT_CORRUPT;
|
||||
/* Do not return any error to the
|
||||
caller. The duplicate will be reported
|
||||
@@ -3966,7 +3966,7 @@ const rec_t *row_search_get_max_rec(dict_index_t *index, mtr_t *mtr) noexcept
|
||||
uint64_t row_search_max_autoinc(dict_index_t *index) noexcept
|
||||
{
|
||||
uint64_t value= 0;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
if (const rec_t *rec= row_search_get_max_rec(index, &mtr))
|
||||
value= row_read_autoinc(*index, rec);
|
||||
|
||||
@@ -253,11 +253,12 @@ row_log_block_free(
|
||||
/** Logs an operation to a secondary index that is (or was) being created.
|
||||
@param index index, S or X latched
|
||||
@param tuple index tuple
|
||||
@param trx transaction
|
||||
@param trx_id transaction ID for insert, or 0 for delete
|
||||
@retval false if row_log_apply() failure happens
|
||||
or true otherwise */
|
||||
bool row_log_online_op(dict_index_t *index, const dtuple_t *tuple,
|
||||
trx_id_t trx_id)
|
||||
static bool row_log_online_op(dict_index_t *index, const dtuple_t *tuple,
|
||||
trx_t *trx, trx_id_t trx_id)
|
||||
{
|
||||
byte* b;
|
||||
ulint extra_size;
|
||||
@@ -350,7 +351,7 @@ start_log:
|
||||
apply the online log for the completed index */
|
||||
index->lock.s_unlock();
|
||||
dberr_t error= row_log_apply(
|
||||
log->alter_trx, index, nullptr, nullptr);
|
||||
trx, index, nullptr, nullptr);
|
||||
index->lock.s_lock(SRW_LOCK_CALL);
|
||||
if (error != DB_SUCCESS) {
|
||||
/* Mark all newly added indexes
|
||||
@@ -1742,12 +1743,13 @@ row_log_table_apply_delete(
|
||||
mem_heap_t* offsets_heap, /*!< in/out: memory heap
|
||||
that can be emptied */
|
||||
mem_heap_t* heap, /*!< in/out: memory heap */
|
||||
const row_log_t* log) /*!< in: online log */
|
||||
const row_merge_dup_t* dup) /*!< in: context */
|
||||
{
|
||||
const row_log_t* const log{dup->index->online_log};
|
||||
dict_table_t* new_table = log->table;
|
||||
dict_index_t* index = dict_table_get_first_index(new_table);
|
||||
dtuple_t* old_pk;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{dup->trx};
|
||||
btr_pcur_t pcur;
|
||||
rec_offs* offsets;
|
||||
|
||||
@@ -1864,7 +1866,7 @@ row_log_table_apply_update(
|
||||
row_log_t* log = dup->index->online_log;
|
||||
const dtuple_t* row;
|
||||
dict_index_t* index = dict_table_get_first_index(log->table);
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{dup->trx};
|
||||
btr_pcur_t pcur;
|
||||
dberr_t error;
|
||||
ulint n_index = 0;
|
||||
@@ -2214,7 +2216,7 @@ row_log_table_apply_op(
|
||||
|
||||
*error = row_log_table_apply_delete(
|
||||
new_trx_id_col,
|
||||
mrec, offsets, offsets_heap, heap, log);
|
||||
mrec, offsets, offsets_heap, heap, dup);
|
||||
break;
|
||||
|
||||
case ROW_T_UPDATE:
|
||||
@@ -2816,10 +2818,11 @@ row_log_table_apply(
|
||||
{
|
||||
dberr_t error;
|
||||
dict_index_t* clust_index;
|
||||
trx_t* const trx{thr_get_trx(thr)};
|
||||
|
||||
thr_get_trx(thr)->error_key_num = 0;
|
||||
trx->error_key_num = 0;
|
||||
DBUG_EXECUTE_IF("innodb_trx_duplicates",
|
||||
thr_get_trx(thr)->duplicates = TRX_DUP_REPLACE;);
|
||||
trx->duplicates = TRX_DUP_REPLACE;);
|
||||
|
||||
stage->begin_phase_log_table();
|
||||
|
||||
@@ -2841,7 +2844,7 @@ row_log_table_apply(
|
||||
error = DB_ERROR;
|
||||
} else {
|
||||
row_merge_dup_t dup = {
|
||||
clust_index, table,
|
||||
clust_index, trx, table,
|
||||
clust_index->online_log->col_map, 0
|
||||
};
|
||||
|
||||
@@ -2853,8 +2856,7 @@ row_log_table_apply(
|
||||
}
|
||||
|
||||
clust_index->lock.x_unlock();
|
||||
DBUG_EXECUTE_IF("innodb_trx_duplicates",
|
||||
thr_get_trx(thr)->duplicates = 0;);
|
||||
DBUG_EXECUTE_IF("innodb_trx_duplicates", trx->duplicates = 0;);
|
||||
|
||||
return(error);
|
||||
}
|
||||
@@ -3032,7 +3034,7 @@ row_log_apply_op_low(
|
||||
trx_id_t trx_id, /*!< in: transaction identifier */
|
||||
const dtuple_t* entry) /*!< in: row */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{dup->trx};
|
||||
btr_cur_t cursor;
|
||||
rec_offs* offsets = NULL;
|
||||
|
||||
@@ -3704,7 +3706,7 @@ func_exit:
|
||||
}
|
||||
|
||||
/** Apply the row log to the index upon completing index creation.
|
||||
@param[in] trx transaction (for checking if the operation was
|
||||
@param[in,out] trx transaction (for checking if the operation was
|
||||
interrupted)
|
||||
@param[in,out] index secondary index
|
||||
@param[in,out] table MySQL table (for reporting duplicates)
|
||||
@@ -3715,13 +3717,13 @@ when row log has been applied by DML thread.
|
||||
@return DB_SUCCESS, or error code on failure */
|
||||
dberr_t
|
||||
row_log_apply(
|
||||
const trx_t* trx,
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
struct TABLE* table,
|
||||
ut_stage_alter_t* stage)
|
||||
{
|
||||
dberr_t error;
|
||||
row_merge_dup_t dup = { index, table, NULL, 0 };
|
||||
row_merge_dup_t dup = { index, trx, table, nullptr, 0 };
|
||||
DBUG_ENTER("row_log_apply");
|
||||
|
||||
ut_ad(dict_index_is_online_ddl(index)
|
||||
@@ -3949,7 +3951,7 @@ void UndorecApplier::log_insert(const dtuple_t &tuple,
|
||||
dtuple_t *entry= row_build_index_entry_low(row, ext, index,
|
||||
heap, ROW_BUILD_NORMAL);
|
||||
entry->copy_field_types(*index);
|
||||
success= row_log_online_op(index, entry, trx_id);
|
||||
success= row_log_online_op(index, entry, mtr.trx, trx_id);
|
||||
}
|
||||
|
||||
index->lock.s_unlock();
|
||||
@@ -4074,7 +4076,7 @@ void UndorecApplier::log_update(const dtuple_t &tuple,
|
||||
|
||||
old_entry->copy_field_types(*index);
|
||||
|
||||
success= row_log_online_op(index, old_entry, 0);
|
||||
success= row_log_online_op(index, old_entry, mtr.trx, 0);
|
||||
|
||||
dtuple_t *new_entry= row_build_index_entry_low(
|
||||
row, new_ext, index, heap, ROW_BUILD_NORMAL);
|
||||
@@ -4082,7 +4084,7 @@ void UndorecApplier::log_update(const dtuple_t &tuple,
|
||||
new_entry->copy_field_types(*index);
|
||||
|
||||
if (success)
|
||||
success= row_log_online_op(index, new_entry, trx_id);
|
||||
success= row_log_online_op(index, new_entry, mtr.trx, trx_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -4091,7 +4093,7 @@ void UndorecApplier::log_update(const dtuple_t &tuple,
|
||||
|
||||
old_entry->copy_field_types(*index);
|
||||
|
||||
success= row_log_online_op(index, old_entry, 0);
|
||||
success= row_log_online_op(index, old_entry, mtr.trx, 0);
|
||||
}
|
||||
}
|
||||
next_index:
|
||||
|
||||
@@ -108,7 +108,7 @@ public:
|
||||
big_rec_t* big_rec;
|
||||
rec_t* rec;
|
||||
btr_cur_t ins_cur;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{scan_mtr->trx};
|
||||
rtr_info_t rtr_info;
|
||||
rec_offs* ins_offsets = NULL;
|
||||
dberr_t error = DB_SUCCESS;
|
||||
@@ -246,6 +246,7 @@ public:
|
||||
#define FTS_PENDING_DOC_MEMORY_LIMIT 1000000
|
||||
|
||||
/** Insert sorted data tuples to the index.
|
||||
@param[in,out] trx transaction
|
||||
@param[in] index index to be inserted
|
||||
@param[in] old_table old table
|
||||
@param[in] fd file descriptor
|
||||
@@ -269,6 +270,7 @@ and then stage->inc() will be called for each record that is processed.
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
row_merge_insert_index_tuples(
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
const dict_table_t* old_table,
|
||||
const pfs_os_file_t& fd,
|
||||
@@ -1873,7 +1875,7 @@ row_merge_read_clustered_index(
|
||||
data for virtual column */
|
||||
btr_pcur_t pcur; /* Cursor on the clustered
|
||||
index */
|
||||
mtr_t mtr; /* Mini transaction */
|
||||
mtr_t mtr{trx}; /* Mini transaction */
|
||||
bool mtr_started = false;
|
||||
dberr_t err = DB_SUCCESS;/* Return code */
|
||||
ulint n_nonnull = 0; /* number of columns
|
||||
@@ -1924,7 +1926,7 @@ row_merge_read_clustered_index(
|
||||
merge_buf = static_cast<row_merge_buf_t**>(
|
||||
ut_malloc_nokey(n_index * sizeof *merge_buf));
|
||||
|
||||
row_merge_dup_t clust_dup = {index[0], table, col_map, 0};
|
||||
row_merge_dup_t clust_dup = {index[0], trx, table, col_map, 0};
|
||||
dfield_t* prev_fields = nullptr;
|
||||
const ulint n_uniq = dict_index_get_n_unique(index[0]);
|
||||
|
||||
@@ -2702,7 +2704,7 @@ write_buffers:
|
||||
}
|
||||
|
||||
err = row_merge_insert_index_tuples(
|
||||
index[i], old_table,
|
||||
trx, index[i], old_table,
|
||||
OS_FILE_CLOSED, NULL, buf,
|
||||
clust_btr_bulk,
|
||||
table_total_rows,
|
||||
@@ -2753,7 +2755,8 @@ write_buffers:
|
||||
}
|
||||
} else if (dict_index_is_unique(buf->index)) {
|
||||
row_merge_dup_t dup = {
|
||||
buf->index, table, col_map, 0};
|
||||
buf->index, trx, table,
|
||||
col_map, 0};
|
||||
|
||||
row_merge_buf_sort(buf, &dup);
|
||||
|
||||
@@ -2815,7 +2818,7 @@ write_buffers:
|
||||
BtrBulk btr_bulk(index[i], trx);
|
||||
|
||||
err = row_merge_insert_index_tuples(
|
||||
index[i], old_table,
|
||||
trx, index[i], old_table,
|
||||
OS_FILE_CLOSED, NULL, buf,
|
||||
&btr_bulk,
|
||||
table_total_rows,
|
||||
@@ -3687,6 +3690,7 @@ row_merge_mtuple_to_dtuple(
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
row_merge_insert_index_tuples(
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
const dict_table_t* old_table,
|
||||
const pfs_os_file_t& fd,
|
||||
@@ -3714,8 +3718,7 @@ row_merge_insert_index_tuples(
|
||||
double curr_progress = 0;
|
||||
dict_index_t* old_index = NULL;
|
||||
const mrec_t* mrec = NULL;
|
||||
mtr_t mtr;
|
||||
|
||||
mtr_t mtr{trx};
|
||||
|
||||
DBUG_ENTER("row_merge_insert_index_tuples");
|
||||
|
||||
@@ -4823,7 +4826,7 @@ row_merge_build_indexes(
|
||||
} else if (merge_files[k].fd != OS_FILE_CLOSED) {
|
||||
char buf[NAME_LEN + 1];
|
||||
row_merge_dup_t dup = {
|
||||
sort_idx, table, col_map, 0};
|
||||
sort_idx, trx, table, col_map, 0};
|
||||
|
||||
pct_cost = (COST_BUILD_INDEX_STATIC +
|
||||
(total_dynamic_cost
|
||||
@@ -4892,7 +4895,7 @@ row_merge_build_indexes(
|
||||
}
|
||||
|
||||
error = row_merge_insert_index_tuples(
|
||||
sort_idx, old_table,
|
||||
trx, sort_idx, old_table,
|
||||
merge_files[k].fd, block, NULL,
|
||||
&btr_bulk,
|
||||
merge_files[k].n_rec, pct_progress, pct_cost,
|
||||
@@ -5192,7 +5195,7 @@ dberr_t row_merge_bulk_t::load_one_row(trx_t *trx)
|
||||
dict_index_t *index= m_merge_buf[0].index;
|
||||
BtrBulk btr_bulk(index, trx);
|
||||
ut_ad(m_merge_buf[0].n_tuples == 1);
|
||||
dberr_t err= row_merge_insert_index_tuples(index, index->table,
|
||||
dberr_t err= row_merge_insert_index_tuples(trx, index, index->table,
|
||||
OS_FILE_CLOSED, nullptr,
|
||||
&m_merge_buf[0], &btr_bulk,
|
||||
0, 0, 0, nullptr,
|
||||
@@ -5203,7 +5206,7 @@ dberr_t row_merge_bulk_t::load_one_row(trx_t *trx)
|
||||
if (err != DB_SUCCESS)
|
||||
trx->error_info= index;
|
||||
else if (index->table->persistent_autoinc)
|
||||
btr_write_autoinc(index, 1);
|
||||
btr_write_autoinc(trx, index, 1);
|
||||
err= btr_bulk.finish(err);
|
||||
if (err == DB_SUCCESS && index->is_clust())
|
||||
index->table->stat_n_rows= 1;
|
||||
@@ -5259,7 +5262,7 @@ add_to_buf:
|
||||
|
||||
if (index->is_unique())
|
||||
{
|
||||
row_merge_dup_t dup{index, nullptr, nullptr, 0};
|
||||
row_merge_dup_t dup{index, trx, nullptr, nullptr, 0};
|
||||
row_merge_buf_sort(buf, &dup);
|
||||
if (dup.n_dup)
|
||||
{
|
||||
@@ -5301,7 +5304,7 @@ dberr_t row_merge_bulk_t::write_to_index(ulint index_no, trx_t *trx)
|
||||
dict_index_t *index= buf.index;
|
||||
dict_table_t *table= index->table;
|
||||
BtrBulk btr_bulk(index, trx);
|
||||
row_merge_dup_t dup = {index, nullptr, nullptr, 0};
|
||||
row_merge_dup_t dup = {index, trx, nullptr, nullptr, 0};
|
||||
|
||||
if (buf.n_tuples)
|
||||
{
|
||||
@@ -5326,7 +5329,7 @@ dberr_t row_merge_bulk_t::write_to_index(ulint index_no, trx_t *trx)
|
||||
{
|
||||
/* Data got fit in merge buffer. */
|
||||
err= row_merge_insert_index_tuples(
|
||||
index, table, OS_FILE_CLOSED, nullptr,
|
||||
trx, index, table, OS_FILE_CLOSED, nullptr,
|
||||
&buf, &btr_bulk, 0, 0, 0, nullptr, table->space_id, nullptr,
|
||||
m_blob_file.fd == OS_FILE_CLOSED ? nullptr : &m_blob_file);
|
||||
goto func_exit;
|
||||
@@ -5340,7 +5343,7 @@ dberr_t row_merge_bulk_t::write_to_index(ulint index_no, trx_t *trx)
|
||||
goto func_exit;
|
||||
|
||||
err= row_merge_insert_index_tuples(
|
||||
index, table, file->fd, m_block, nullptr,
|
||||
trx, index, table, file->fd, m_block, nullptr,
|
||||
&btr_bulk, 0, 0, 0, m_crypt_block, table->space_id,
|
||||
nullptr, &m_blob_file);
|
||||
|
||||
@@ -5348,7 +5351,7 @@ func_exit:
|
||||
if (err != DB_SUCCESS)
|
||||
trx->error_info= index;
|
||||
else if (index->is_primary() && table->persistent_autoinc)
|
||||
btr_write_autoinc(index, table->autoinc - 1);
|
||||
btr_write_autoinc(trx, index, table->autoinc - 1);
|
||||
err= btr_bulk.finish(err);
|
||||
if (err == DB_SUCCESS && index->is_clust())
|
||||
table->stat_n_rows= (file && file->fd != OS_FILE_CLOSED)
|
||||
|
||||
@@ -1755,7 +1755,7 @@ row_unlock_for_mysql(
|
||||
const rec_t* rec;
|
||||
dict_index_t* index;
|
||||
trx_id_t rec_trx_id;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{prebuilt->trx};
|
||||
btr_pcur_t* pcur = prebuilt->pcur;
|
||||
|
||||
mtr_start(&mtr);
|
||||
@@ -2216,7 +2216,7 @@ row_mysql_table_id_reassign(
|
||||
dberr_t err;
|
||||
pars_info_t* info = pars_info_create();
|
||||
|
||||
dict_hdr_get_new_id(new_id, NULL, NULL);
|
||||
dict_hdr_get_new_id(trx, new_id, NULL, NULL);
|
||||
|
||||
pars_info_add_ull_literal(info, "old_id", table->id);
|
||||
pars_info_add_ull_literal(info, "new_id", *new_id);
|
||||
@@ -2837,15 +2837,17 @@ row_rename_table_for_mysql(
|
||||
/* We only want to switch off some of the type checking in
|
||||
an ALTER TABLE, not in a RENAME. */
|
||||
dict_names_t fk_tables;
|
||||
|
||||
err = dict_load_foreigns(
|
||||
new_name, nullptr, trx->id,
|
||||
!old_is_tmp || trx->check_foreigns,
|
||||
fk == RENAME_ALTER_COPY
|
||||
? DICT_ERR_IGNORE_NONE
|
||||
: DICT_ERR_IGNORE_FK_NOKEY,
|
||||
fk_tables);
|
||||
|
||||
{
|
||||
mtr_t mtr{trx};
|
||||
err = dict_load_foreigns(mtr, new_name, nullptr,
|
||||
trx->id,
|
||||
!old_is_tmp
|
||||
|| trx->check_foreigns,
|
||||
fk == RENAME_ALTER_COPY
|
||||
? DICT_ERR_IGNORE_NONE
|
||||
: DICT_ERR_IGNORE_FK_NOKEY,
|
||||
fk_tables);
|
||||
}
|
||||
if (err != DB_SUCCESS) {
|
||||
if (old_is_tmp) {
|
||||
/* In case of copy alter, ignore the
|
||||
|
||||
@@ -129,7 +129,7 @@ retry:
|
||||
}
|
||||
}
|
||||
}
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
mtr.start();
|
||||
index->set_modified(mtr);
|
||||
log_free_check();
|
||||
@@ -287,8 +287,7 @@ stored in undo log
|
||||
@param[in] clust_offsets offsets on the cluster record
|
||||
@param[in] index the secondary index
|
||||
@param[in] ientry the secondary index entry
|
||||
@param[in] roll_ptr the rollback pointer for the purging record
|
||||
@param[in] trx_id trx id for the purging record
|
||||
@param[in] node purge node
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[in,out] v_row dtuple holding the virtual rows (if needed)
|
||||
@return true if matches, false otherwise */
|
||||
@@ -301,8 +300,7 @@ row_purge_vc_matches_cluster(
|
||||
rec_offs* clust_offsets,
|
||||
dict_index_t* index,
|
||||
const dtuple_t* ientry,
|
||||
roll_ptr_t roll_ptr,
|
||||
trx_id_t trx_id,
|
||||
const purge_node_t&node,
|
||||
mtr_t* mtr,
|
||||
dtuple_t** vrow)
|
||||
{
|
||||
@@ -365,7 +363,7 @@ row_purge_vc_matches_cluster(
|
||||
version, clust_index, clust_offsets);
|
||||
|
||||
ut_ad(cur_roll_ptr != 0);
|
||||
ut_ad(roll_ptr != 0);
|
||||
ut_ad(node.roll_ptr != 0);
|
||||
|
||||
trx_undo_prev_version_build(
|
||||
version, clust_index, clust_offsets,
|
||||
@@ -432,10 +430,10 @@ row_purge_vc_matches_cluster(
|
||||
}
|
||||
}
|
||||
|
||||
trx_id_t rec_trx_id = row_get_rec_trx_id(
|
||||
prev_version, clust_index, clust_offsets);
|
||||
|
||||
if (rec_trx_id < trx_id || roll_ptr == cur_roll_ptr) {
|
||||
if (node.roll_ptr == cur_roll_ptr
|
||||
|| row_get_rec_trx_id(
|
||||
prev_version, clust_index, clust_offsets)
|
||||
< node.trx_id) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -571,8 +569,7 @@ static bool row_purge_is_unsafe(const purge_node_t &node,
|
||||
if (entry && row_purge_vc_matches_cluster(
|
||||
rec, entry,
|
||||
clust_index, clust_offsets,
|
||||
index, ientry, roll_ptr,
|
||||
trx_id, mtr, &vrow)) {
|
||||
index, ientry, node, mtr, &vrow)) {
|
||||
goto unsafe_to_purge;
|
||||
}
|
||||
}
|
||||
@@ -741,6 +738,7 @@ page latch.
|
||||
static bool row_purge_poss_sec(purge_node_t *node, dict_index_t *index,
|
||||
const dtuple_t *entry, mtr_t *mtr)
|
||||
{
|
||||
ut_ad(mtr->trx == node->trx);
|
||||
ut_ad(!index->is_clust());
|
||||
const auto savepoint= mtr->get_savepoint();
|
||||
bool can_delete= !row_purge_reposition_pcur(BTR_SEARCH_LEAF, node, mtr);
|
||||
@@ -789,15 +787,15 @@ static bool row_purge_remove_sec_if_poss_tree(purge_node_t *node,
|
||||
btr_pcur_t pcur;
|
||||
bool success = true;
|
||||
dberr_t err;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
|
||||
log_free_check();
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
DBUG_EXECUTE_IF("enable_row_purge_sec_tree_sync",
|
||||
debug_sync_set_action(current_thd, STRING_WITH_LEN(
|
||||
debug_sync_set_action(node->trx->mysql_thd, STRING_WITH_LEN(
|
||||
"now SIGNAL "
|
||||
"purge_sec_tree_begin"));
|
||||
debug_sync_set_action(current_thd, STRING_WITH_LEN(
|
||||
debug_sync_set_action(node->trx->mysql_thd, STRING_WITH_LEN(
|
||||
"now WAIT_FOR "
|
||||
"purge_sec_tree_execute"));
|
||||
);
|
||||
@@ -892,7 +890,7 @@ static trx_id_t row_purge_remove_sec_if_poss_leaf(purge_node_t *node,
|
||||
dict_index_t *index,
|
||||
const dtuple_t *entry)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
btr_pcur_t pcur;
|
||||
trx_id_t page_max_trx_id = 0;
|
||||
|
||||
@@ -925,7 +923,7 @@ found:
|
||||
->not_redundant())) {
|
||||
row_purge_del_mark_error(pcur.btr_cur, *entry);
|
||||
mtr.commit();
|
||||
dict_set_corrupted(index, "purge");
|
||||
dict_set_corrupted(node->trx, index, "purge");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@@ -990,13 +988,14 @@ row_purge_remove_sec_if_poss(
|
||||
ut_a(--n_tries);
|
||||
}
|
||||
|
||||
/***********************************************************//**
|
||||
/**
|
||||
Purges a delete marking of a record.
|
||||
@param node row purge node
|
||||
@retval true if the row was not found, or it was successfully removed
|
||||
@retval false the purge needs to be suspended because of
|
||||
running out of file space */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
bool row_purge_del_mark(purge_node_t *node)
|
||||
bool row_purge_del_mark(purge_node_t *node) noexcept
|
||||
{
|
||||
if (node->index)
|
||||
{
|
||||
@@ -1024,7 +1023,7 @@ bool row_purge_del_mark(purge_node_t *node)
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
DBUG_EXECUTE_IF("enable_row_purge_del_mark_exit_sync_point",
|
||||
debug_sync_set_action
|
||||
(current_thd,
|
||||
(node->trx->mysql_thd,
|
||||
STRING_WITH_LEN("now SIGNAL row_purge_del_mark_finished"));
|
||||
);
|
||||
#endif
|
||||
@@ -1037,11 +1036,8 @@ Purges an update of an existing record. Also purges an update of a delete
|
||||
marked record if that record contained an externally stored field. */
|
||||
static
|
||||
void
|
||||
row_purge_upd_exist_or_extern_func(
|
||||
/*===============================*/
|
||||
#ifdef UNIV_DEBUG
|
||||
row_purge_upd_exist_or_extern(
|
||||
const que_thr_t*thr, /*!< in: query thread */
|
||||
#endif /* UNIV_DEBUG */
|
||||
purge_node_t* node, /*!< in: row purge node */
|
||||
const trx_undo_rec_t* undo_rec) /*!< in: record to purge */
|
||||
{
|
||||
@@ -1073,7 +1069,8 @@ row_purge_upd_exist_or_extern_func(
|
||||
dtuple_t* entry = row_build_index_entry_low(
|
||||
node->row, NULL, node->index,
|
||||
heap, ROW_BUILD_FOR_PURGE);
|
||||
row_purge_remove_sec_if_poss(node, node->index, entry);
|
||||
row_purge_remove_sec_if_poss(
|
||||
node, node->index, entry);
|
||||
|
||||
ut_ad(node->table);
|
||||
|
||||
@@ -1084,7 +1081,7 @@ row_purge_upd_exist_or_extern_func(
|
||||
mem_heap_free(heap);
|
||||
|
||||
skip_secondaries:
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
dict_index_t* index = dict_table_get_first_index(node->table);
|
||||
/* Free possible externally stored fields */
|
||||
for (ulint i = 0; i < upd_get_n_fields(node->update); i++) {
|
||||
@@ -1163,14 +1160,6 @@ skip_secondaries:
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
# define row_purge_upd_exist_or_extern(thr,node,undo_rec) \
|
||||
row_purge_upd_exist_or_extern_func(thr,node,undo_rec)
|
||||
#else /* UNIV_DEBUG */
|
||||
# define row_purge_upd_exist_or_extern(thr,node,undo_rec) \
|
||||
row_purge_upd_exist_or_extern_func(node,undo_rec)
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** Build a partial row from an update undo log record for purge.
|
||||
Any columns which occur as ordering in any index of the table are present.
|
||||
Any missing columns are indicated by col->mtype == DATA_MISSING.
|
||||
@@ -1419,12 +1408,10 @@ row_purge_parse_undo_rec(
|
||||
@return true if purged, false if skipped */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
bool
|
||||
row_purge_record_func(
|
||||
row_purge_record(
|
||||
purge_node_t* node,
|
||||
const trx_undo_rec_t* undo_rec,
|
||||
#if defined UNIV_DEBUG || defined WITH_WSREP
|
||||
const que_thr_t*thr,
|
||||
#endif /* UNIV_DEBUG || WITH_WSREP */
|
||||
bool updated_extern)
|
||||
{
|
||||
ut_ad(!node->found_clust);
|
||||
@@ -1473,14 +1460,6 @@ row_purge_record_func(
|
||||
return(purged);
|
||||
}
|
||||
|
||||
#if defined UNIV_DEBUG || defined WITH_WSREP
|
||||
# define row_purge_record(node,undo_rec,thr,updated_extern) \
|
||||
row_purge_record_func(node,undo_rec,thr,updated_extern)
|
||||
#else /* UNIV_DEBUG || WITH_WSREP */
|
||||
# define row_purge_record(node,undo_rec,thr,updated_extern) \
|
||||
row_purge_record_func(node,undo_rec,updated_extern)
|
||||
#endif /* UNIV_DEBUG || WITH_WSREP */
|
||||
|
||||
/***********************************************************//**
|
||||
Fetches an undo log record and does the purge for the recorded operation.
|
||||
If none left, or the current purge completed, returns the control to the
|
||||
|
||||
@@ -880,7 +880,7 @@ row_sel_build_committed_vers_for_mysql(
|
||||
rec_offs_size(*offsets));
|
||||
}
|
||||
|
||||
row_vers_build_for_semi_consistent_read(prebuilt->trx,
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
rec, mtr, clust_index, offsets, offset_heap,
|
||||
prebuilt->old_vers_heap, old_vers, vrow);
|
||||
}
|
||||
@@ -1669,7 +1669,7 @@ row_sel(
|
||||
{
|
||||
dict_index_t* index;
|
||||
plan_t* plan;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{thr_get_trx(thr)};
|
||||
ibool moved;
|
||||
rec_t* rec;
|
||||
rec_t* old_vers;
|
||||
@@ -4521,7 +4521,7 @@ early_not_found:
|
||||
/* if the query is a plain locking SELECT, and the isolation level
|
||||
is <= TRX_ISO_READ_COMMITTED, then this is set to FALSE */
|
||||
bool did_semi_consistent_read = false;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
|
||||
mem_heap_t* heap = NULL;
|
||||
@@ -6000,7 +6000,7 @@ row_count_rtree_recs(
|
||||
{
|
||||
dict_index_t* index = prebuilt->index;
|
||||
dberr_t ret = DB_SUCCESS;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{prebuilt->trx};
|
||||
mem_heap_t* heap;
|
||||
dtuple_t* entry;
|
||||
dtuple_t* search_entry = prebuilt->search_tuple;
|
||||
@@ -6234,7 +6234,8 @@ dberr_t row_check_index(row_prebuilt_t *prebuilt, ulint *n_rows)
|
||||
mem_heap_t *heap= mem_heap_create(100);
|
||||
|
||||
dtuple_t *prev_entry= nullptr;
|
||||
mtr_t mtr;
|
||||
trx_t *const trx{prebuilt->trx};
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
|
||||
dict_index_t *clust_index= dict_table_get_first_index(prebuilt->table);
|
||||
@@ -6249,17 +6250,17 @@ func_exit:
|
||||
}
|
||||
|
||||
if (const trx_id_t bulk_trx_id= index->table->bulk_trx_id)
|
||||
if (!prebuilt->trx->read_view.changes_visible(bulk_trx_id))
|
||||
if (!trx->read_view.changes_visible(bulk_trx_id))
|
||||
goto func_exit;
|
||||
|
||||
ReadView check_table_extended_view;
|
||||
ReadView &view=
|
||||
prebuilt->need_to_access_clustered &&
|
||||
!prebuilt->table->is_temporary() &&
|
||||
prebuilt->trx->isolation_level != TRX_ISO_READ_UNCOMMITTED
|
||||
? check_table_extended_view : prebuilt->trx->read_view;
|
||||
trx->isolation_level != TRX_ISO_READ_UNCOMMITTED
|
||||
? check_table_extended_view : trx->read_view;
|
||||
if (&view == &check_table_extended_view)
|
||||
check_table_extended_view.set_creator_trx_id(prebuilt->trx->id);
|
||||
check_table_extended_view.set_creator_trx_id(trx->id);
|
||||
|
||||
page_loop:
|
||||
if (&view == &check_table_extended_view)
|
||||
@@ -6301,7 +6302,7 @@ rec_loop:
|
||||
if (btr_pcur_is_after_last_in_tree(prebuilt->pcur))
|
||||
goto func_exit;
|
||||
err= btr_pcur_move_to_next_page(prebuilt->pcur, &mtr);
|
||||
if (err == DB_SUCCESS && trx_is_interrupted(prebuilt->trx))
|
||||
if (err == DB_SUCCESS && trx_is_interrupted(trx))
|
||||
err= DB_INTERRUPTED;
|
||||
if (UNIV_UNLIKELY(err != DB_SUCCESS))
|
||||
goto func_exit;
|
||||
@@ -6319,7 +6320,7 @@ rec_loop:
|
||||
{
|
||||
if (*n_rows || !index->is_instant())
|
||||
{
|
||||
push_warning_printf(prebuilt->trx->mysql_thd,
|
||||
push_warning_printf(trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE,
|
||||
"InnoDB: invalid record encountered");
|
||||
prebuilt->autoinc_error= DB_INDEX_CORRUPT;
|
||||
@@ -6335,18 +6336,17 @@ rec_loop:
|
||||
}
|
||||
else if (index->is_clust())
|
||||
{
|
||||
if (prebuilt->trx->isolation_level == TRX_ISO_READ_UNCOMMITTED)
|
||||
if (trx->isolation_level == TRX_ISO_READ_UNCOMMITTED)
|
||||
goto count_or_not;
|
||||
|
||||
trx_id_t rec_trx_id= row_get_rec_trx_id(rec, index, offsets);
|
||||
|
||||
if (rec_trx_id >= prebuilt->trx->read_view.low_limit_id() &&
|
||||
if (rec_trx_id >= trx->read_view.low_limit_id() &&
|
||||
UNIV_UNLIKELY(rec_trx_id >= trx_sys.get_max_trx_id()))
|
||||
{
|
||||
invalid_trx_id:
|
||||
if (prebuilt->autoinc_error == DB_SUCCESS)
|
||||
push_warning_printf(prebuilt->trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
push_warning_printf(trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_NOT_KEYFILE,
|
||||
"InnoDB: DB_TRX_ID=" TRX_ID_FMT
|
||||
" exceeds the system-wide maximum",
|
||||
@@ -6355,7 +6355,7 @@ rec_loop:
|
||||
goto next_rec;
|
||||
}
|
||||
|
||||
if (!prebuilt->trx->read_view.changes_visible(rec_trx_id))
|
||||
if (!trx->read_view.changes_visible(rec_trx_id))
|
||||
{
|
||||
ut_ad(srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN);
|
||||
rec_t *old_vers;
|
||||
@@ -6371,7 +6371,7 @@ rec_loop:
|
||||
rec= old_vers;
|
||||
rec_trx_id= row_get_rec_trx_id(rec, index, offsets);
|
||||
|
||||
if (rec_trx_id >= prebuilt->trx->read_view.low_limit_id() &&
|
||||
if (rec_trx_id >= trx->read_view.low_limit_id() &&
|
||||
UNIV_UNLIKELY(rec_trx_id >= trx_sys.get_max_trx_id()))
|
||||
goto invalid_trx_id;
|
||||
|
||||
@@ -6392,8 +6392,7 @@ rec_loop:
|
||||
<< index->table->name << ": "
|
||||
<< rec_offsets_print(rec, offsets);
|
||||
prebuilt->autoinc_error= DB_MISSING_HISTORY;
|
||||
push_warning_printf(prebuilt->trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
push_warning_printf(trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_NOT_KEYFILE, "InnoDB: %s", w.m_oss.str().c_str());
|
||||
}
|
||||
|
||||
@@ -6404,7 +6403,7 @@ rec_loop:
|
||||
{
|
||||
if (page_trx_id >= trx_sys.get_max_trx_id())
|
||||
goto invalid_PAGE_MAX_TRX_ID;
|
||||
if (prebuilt->trx->isolation_level == TRX_ISO_READ_UNCOMMITTED);
|
||||
if (trx->isolation_level == TRX_ISO_READ_UNCOMMITTED);
|
||||
else if (&view == &check_table_extended_view || rec_deleted ||
|
||||
!view.sees(page_trx_id))
|
||||
{
|
||||
@@ -6449,8 +6448,7 @@ rec_loop:
|
||||
w << "Clustered index record not found for index "
|
||||
<< index->name << " of table " << index->table->name
|
||||
<< ": " << rec_offsets_print(rec, offsets);
|
||||
push_warning_printf(prebuilt->trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
push_warning_printf(trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_NOT_KEYFILE, "InnoDB: %s",
|
||||
w.m_oss.str().c_str());
|
||||
}
|
||||
@@ -6551,7 +6549,7 @@ rec_loop:
|
||||
got_extended_match= err == DB_SUCCESS;
|
||||
err= DB_SUCCESS;
|
||||
|
||||
if (!prebuilt->trx->read_view.changes_visible(rec_trx_id))
|
||||
if (!trx->read_view.changes_visible(rec_trx_id))
|
||||
/* While CHECK TABLE ... EXTENDED checks for a matching
|
||||
clustered index record version for each secondary index
|
||||
record, it must count only those records that belong to its
|
||||
@@ -6589,8 +6587,7 @@ rec_loop:
|
||||
{
|
||||
invalid_rec_trx_id:
|
||||
if (prebuilt->autoinc_error == DB_SUCCESS)
|
||||
push_warning_printf(prebuilt->trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
push_warning_printf(trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_NOT_KEYFILE,
|
||||
"InnoDB: DB_TRX_ID=" TRX_ID_FMT
|
||||
" exceeds the system-wide maximum",
|
||||
@@ -6664,7 +6661,7 @@ rec_loop:
|
||||
clust_offsets);
|
||||
|
||||
if (UNIV_UNLIKELY(rec_trx_id >=
|
||||
prebuilt->trx->read_view.low_limit_id() &&
|
||||
trx->read_view.low_limit_id() &&
|
||||
rec_trx_id >= trx_sys.get_max_trx_id()))
|
||||
{
|
||||
mem_heap_free(vers_heap);
|
||||
@@ -6672,11 +6669,11 @@ rec_loop:
|
||||
}
|
||||
|
||||
const bool rec_visible=
|
||||
prebuilt->trx->read_view.changes_visible(rec_trx_id);
|
||||
trx->read_view.changes_visible(rec_trx_id);
|
||||
const bool clust_rec_deleted=
|
||||
rec_get_deleted_flag(clust_rec, prebuilt->table->not_redundant());
|
||||
|
||||
if (&view != &prebuilt->trx->read_view)
|
||||
if (&view != &trx->read_view)
|
||||
{
|
||||
/* It is not safe to fetch BLOBs of committed delete-marked
|
||||
records that may have been freed in purge. */
|
||||
@@ -6752,7 +6749,7 @@ rec_loop:
|
||||
ULINT_UNDEFINED, &heap);
|
||||
check_match:
|
||||
/* This clustered index record version exists in
|
||||
prebuilt->trx->read_view and is not delete-marked.
|
||||
trx->read_view and is not delete-marked.
|
||||
By design, any BLOBs in it are not allowed to be
|
||||
freed in the purge of committed transaction history. */
|
||||
err= row_check_index_match(prebuilt, clust_rec, clust_index,
|
||||
@@ -6776,7 +6773,7 @@ rec_loop:
|
||||
invalid_PAGE_MAX_TRX_ID:
|
||||
if (UNIV_LIKELY(srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN))
|
||||
{
|
||||
push_warning_printf(prebuilt->trx->mysql_thd,
|
||||
push_warning_printf(trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE,
|
||||
"InnoDB: Invalid PAGE_MAX_TRX_ID=%" PRIu64
|
||||
" in index '%-.200s'",
|
||||
|
||||
@@ -68,7 +68,7 @@ row_undo_ins_remove_clust_rec(
|
||||
{
|
||||
dberr_t err;
|
||||
ulint n_tries = 0;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
dict_index_t* index = node->pcur.index();
|
||||
table_id_t table_id = 0;
|
||||
const bool dict_locked = node->trx->dict_operation_lock_mode;
|
||||
@@ -258,7 +258,7 @@ row_undo_ins_remove_sec_low(
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{thr_get_trx(thr)};
|
||||
const bool modify_leaf = mode == BTR_MODIFY_LEAF;
|
||||
|
||||
pcur.btr_cur.page_cur.index = index;
|
||||
|
||||
@@ -245,7 +245,7 @@ row_undo_mod_clust(
|
||||
que_thr_t* thr) /*!< in: query thread */
|
||||
{
|
||||
btr_pcur_t* pcur;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
dberr_t err;
|
||||
dict_index_t* index;
|
||||
|
||||
@@ -478,6 +478,7 @@ corresponds to a secondary index entry.
|
||||
@param index secondary index
|
||||
@param ientry secondary index entry
|
||||
@param mtr mini-transaction
|
||||
@param trx transaction connected to current_thd
|
||||
@return whether an accessible non-dete-marked version of rec
|
||||
corresponds to ientry */
|
||||
static bool row_undo_mod_sec_is_unsafe(const rec_t *rec, dict_index_t *index,
|
||||
@@ -525,8 +526,8 @@ static bool row_undo_mod_sec_is_unsafe(const rec_t *rec, dict_index_t *index,
|
||||
|
||||
trx_undo_prev_version_build(version,
|
||||
clust_index, clust_offsets,
|
||||
heap, &prev_version,
|
||||
mtr, TRX_UNDO_CHECK_PURGEABILITY,
|
||||
heap, &prev_version, mtr,
|
||||
TRX_UNDO_CHECK_PURGEABILITY,
|
||||
nullptr,
|
||||
dict_index_has_virtual(index)
|
||||
? &vrow : nullptr);
|
||||
@@ -626,7 +627,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
|
||||
btr_pcur_t pcur;
|
||||
btr_cur_t* btr_cur;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{thr->graph->trx};
|
||||
const bool modify_leaf = mode == BTR_MODIFY_LEAF;
|
||||
|
||||
row_mtr_start(&mtr, index);
|
||||
@@ -791,8 +792,8 @@ row_undo_mod_del_unmark_sec_and_undo_update(
|
||||
upd_t* update;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
big_rec_t* dummy_big_rec;
|
||||
mtr_t mtr;
|
||||
trx_t* trx = thr_get_trx(thr);
|
||||
mtr_t mtr{trx};
|
||||
const ulint flags
|
||||
= BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG;
|
||||
const auto orig_mode = mode;
|
||||
|
||||
@@ -163,7 +163,7 @@ row_undo_search_clust_to_pcur(
|
||||
{
|
||||
dict_index_t* clust_index;
|
||||
bool found;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{node->trx};
|
||||
row_ext_t** ext;
|
||||
const rec_t* rec;
|
||||
mem_heap_t* heap = NULL;
|
||||
@@ -264,7 +264,7 @@ static buf_block_t* row_undo_rec_get(undo_node_t* node)
|
||||
|
||||
if (trx->pages_undone) {
|
||||
trx->pages_undone = 0;
|
||||
trx_undo_try_truncate(*trx);
|
||||
trx_undo_try_truncate(trx);
|
||||
}
|
||||
|
||||
trx_undo_t* undo = NULL;
|
||||
@@ -292,7 +292,7 @@ static buf_block_t* row_undo_rec_get(undo_node_t* node)
|
||||
}
|
||||
|
||||
if (undo == NULL) {
|
||||
trx_undo_try_truncate(*trx);
|
||||
trx_undo_try_truncate(trx);
|
||||
/* Mark any ROLLBACK TO SAVEPOINT completed, so that
|
||||
if the transaction object is committed and reused
|
||||
later, we will default to a full ROLLBACK. */
|
||||
@@ -308,7 +308,7 @@ static buf_block_t* row_undo_rec_get(undo_node_t* node)
|
||||
false, trx_sys.rseg_id(undo->rseg, !node->is_temp),
|
||||
undo->top_page_no, undo->top_offset);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
|
||||
buf_block_t* undo_page = buf_page_get(
|
||||
|
||||
@@ -1824,16 +1824,15 @@ row_upd_sec_index_entry(
|
||||
upd_node_t* node, /*!< in: row update node */
|
||||
que_thr_t* thr) /*!< in: query thread */
|
||||
{
|
||||
mtr_t mtr;
|
||||
btr_pcur_t pcur;
|
||||
mem_heap_t* heap;
|
||||
dtuple_t* entry;
|
||||
dict_index_t* index;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
trx_t* trx = thr_get_trx(thr);
|
||||
mtr_t mtr{thr_get_trx(thr)};
|
||||
ulint flags;
|
||||
|
||||
ut_ad(trx->id != 0);
|
||||
ut_ad(mtr.trx->id != 0);
|
||||
|
||||
index = node->index;
|
||||
ut_ad(index->is_committed());
|
||||
@@ -1842,9 +1841,9 @@ row_upd_sec_index_entry(
|
||||
if index->is_committed(). */
|
||||
ut_ad(!dict_index_is_online_ddl(index));
|
||||
|
||||
const bool referenced = row_upd_index_is_referenced(index, trx);
|
||||
const bool referenced = row_upd_index_is_referenced(index, mtr.trx);
|
||||
#ifdef WITH_WSREP
|
||||
const bool foreign = wsrep_row_upd_index_is_foreign(index, trx);
|
||||
const bool foreign = wsrep_row_upd_index_is_foreign(index, mtr.trx);
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
heap = mem_heap_create(1024);
|
||||
@@ -1855,7 +1854,7 @@ row_upd_sec_index_entry(
|
||||
|
||||
log_free_check();
|
||||
|
||||
DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
|
||||
DEBUG_SYNC_C_IF_THD(mtr.trx->mysql_thd,
|
||||
"before_row_upd_sec_index_entry");
|
||||
|
||||
mtr.start();
|
||||
@@ -1900,7 +1899,7 @@ not_found:
|
||||
#ifdef UNIV_DEBUG
|
||||
mtr_commit(&mtr);
|
||||
mtr_start(&mtr);
|
||||
ut_ad(btr_validate_index(index, 0) == DB_SUCCESS);
|
||||
ut_ad(btr_validate_index(index, mtr.trx) == DB_SUCCESS);
|
||||
ut_ad(0);
|
||||
#endif /* UNIV_DEBUG */
|
||||
} else {
|
||||
@@ -1926,8 +1925,8 @@ found:
|
||||
&mtr);
|
||||
#ifdef WITH_WSREP
|
||||
if (!referenced && foreign
|
||||
&& wsrep_must_process_fk(node, trx)
|
||||
&& !wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
|
||||
&& wsrep_must_process_fk(node, mtr.trx)
|
||||
&& !wsrep_thd_is_BF(mtr.trx->mysql_thd, FALSE)) {
|
||||
|
||||
rec_offs* offsets = rec_get_offsets(
|
||||
rec, index, NULL, index->n_core_fields,
|
||||
@@ -1948,13 +1947,13 @@ found:
|
||||
WSREP_DEBUG("Foreign key check fail: "
|
||||
"%s on table %s index %s query %s",
|
||||
ut_strerr(err), index->name(), index->table->name.m_name,
|
||||
wsrep_thd_query(trx->mysql_thd));
|
||||
wsrep_thd_query(mtr.trx->mysql_thd));
|
||||
break;
|
||||
default:
|
||||
WSREP_ERROR("Foreign key check fail: "
|
||||
"%s on table %s index %s query %s",
|
||||
ut_strerr(err), index->name(), index->table->name.m_name,
|
||||
wsrep_thd_query(trx->mysql_thd));
|
||||
wsrep_thd_query(mtr.trx->mysql_thd));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1992,7 +1991,7 @@ close:
|
||||
|
||||
mem_heap_empty(heap);
|
||||
|
||||
DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
|
||||
DEBUG_SYNC_C_IF_THD(mtr.trx->mysql_thd,
|
||||
"before_row_upd_sec_new_index_entry");
|
||||
|
||||
/* Build a new index entry */
|
||||
@@ -2539,13 +2538,13 @@ row_upd_clust_step(
|
||||
dict_index_t* index;
|
||||
btr_pcur_t* pcur;
|
||||
dberr_t err;
|
||||
mtr_t mtr;
|
||||
rec_t* rec;
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets;
|
||||
ulint flags;
|
||||
trx_t* trx = thr_get_trx(thr);
|
||||
mtr_t mtr{trx};
|
||||
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
|
||||
@@ -69,7 +69,6 @@ row_vers_non_virtual_fields_equal(
|
||||
|
||||
/** Determine if an active transaction has inserted or modified a secondary
|
||||
index record.
|
||||
@param[in,out] caller_trx trx of current thread
|
||||
@param[in] clust_rec clustered index record
|
||||
@param[in] clust_index clustered index
|
||||
@param[in] rec secondary index record
|
||||
@@ -82,7 +81,6 @@ acquiring trx->mutex, and trx->release_reference() must be invoked
|
||||
UNIV_INLINE
|
||||
trx_t*
|
||||
row_vers_impl_x_locked_low(
|
||||
trx_t* caller_trx,
|
||||
const rec_t* clust_rec,
|
||||
dict_index_t* clust_index,
|
||||
const rec_t* rec,
|
||||
@@ -123,7 +121,7 @@ row_vers_impl_x_locked_low(
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
|
||||
if (trx_id <= caller_trx->max_inactive_id) {
|
||||
if (trx_id <= mtr->trx->max_inactive_id) {
|
||||
/* The transaction history was already purged. */
|
||||
mem_heap_free(heap);
|
||||
DBUG_RETURN(0);
|
||||
@@ -133,11 +131,11 @@ row_vers_impl_x_locked_low(
|
||||
|
||||
trx_t* trx;
|
||||
|
||||
if (trx_id == caller_trx->id) {
|
||||
trx = caller_trx;
|
||||
if (trx_id == mtr->trx->id) {
|
||||
trx = mtr->trx;
|
||||
trx->reference();
|
||||
} else {
|
||||
trx = trx_sys.find(caller_trx, trx_id);
|
||||
trx = trx_sys.find(mtr->trx, trx_id);
|
||||
if (trx == 0) {
|
||||
/* The transaction that modified or inserted
|
||||
clust_rec is no longer active, or it is
|
||||
@@ -397,7 +395,7 @@ row_vers_impl_x_locked(
|
||||
dict_index_t* index,
|
||||
const rec_offs* offsets)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{caller_trx};
|
||||
trx_t* trx;
|
||||
const rec_t* clust_rec;
|
||||
dict_index_t* clust_index;
|
||||
@@ -437,7 +435,7 @@ row_vers_impl_x_locked(
|
||||
trx = 0;
|
||||
} else {
|
||||
trx = row_vers_impl_x_locked_low(
|
||||
caller_trx, clust_rec, clust_index, rec, index,
|
||||
clust_rec, clust_index, rec, index,
|
||||
offsets, &mtr);
|
||||
|
||||
ut_ad(trx == 0 || trx->is_referenced());
|
||||
@@ -822,7 +820,6 @@ which should be seen by a semi-consistent read. */
|
||||
void
|
||||
row_vers_build_for_semi_consistent_read(
|
||||
/*====================================*/
|
||||
trx_t* caller_trx,/*!<in/out: trx of current thread */
|
||||
const rec_t* rec, /*!< in: record in a clustered index; the
|
||||
caller must have a latch on the page; this
|
||||
latch locks the top of the stack of versions
|
||||
@@ -867,7 +864,7 @@ row_vers_build_for_semi_consistent_read(
|
||||
rec_trx_id = version_trx_id;
|
||||
}
|
||||
|
||||
if (!trx_sys.is_registered(caller_trx, version_trx_id)) {
|
||||
if (!trx_sys.is_registered(mtr->trx, version_trx_id)) {
|
||||
committed_version_trx:
|
||||
/* We found a version that belongs to a
|
||||
committed transaction: return it. */
|
||||
|
||||
@@ -65,6 +65,7 @@ Created 10/8/1995 Heikki Tuuri
|
||||
#include "fil0crypt.h"
|
||||
#include "fil0pagecompress.h"
|
||||
#include "trx0types.h"
|
||||
#include "row0purge.h"
|
||||
#include <list>
|
||||
#include "log.h"
|
||||
|
||||
@@ -452,7 +453,7 @@ struct purge_coordinator_state
|
||||
size_t history_size;
|
||||
Atomic_counter<int> m_running;
|
||||
public:
|
||||
inline void do_purge();
|
||||
inline void do_purge(trx_t *trx);
|
||||
};
|
||||
|
||||
static purge_coordinator_state purge_state;
|
||||
@@ -866,9 +867,6 @@ srv_export_innodb_status(void)
|
||||
export_vars.innodb_data_written = srv_stats.data_written
|
||||
+ (dblwr << srv_page_size_shift);
|
||||
|
||||
export_vars.innodb_buffer_pool_read_requests
|
||||
= buf_pool.stat.n_page_gets;
|
||||
|
||||
mysql_mutex_lock(&buf_pool.mutex);
|
||||
export_vars.innodb_buffer_pool_bytes_data =
|
||||
buf_pool.stat.LRU_bytes
|
||||
@@ -1344,7 +1342,7 @@ static bool srv_purge_should_exit(size_t old_history_size)
|
||||
/*********************************************************************//**
|
||||
Fetch and execute a task from the work queue.
|
||||
@return true if a task was executed */
|
||||
static bool srv_task_execute()
|
||||
static bool srv_task_execute(THD *thd)
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
ut_ad(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
|
||||
@@ -1355,6 +1353,7 @@ static bool srv_task_execute()
|
||||
ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE);
|
||||
UT_LIST_REMOVE(srv_sys.tasks, thr);
|
||||
mysql_mutex_unlock(&srv_sys.tasks_mutex);
|
||||
static_cast<purge_node_t*>(thr->child)->trx = thd_to_trx(thd);
|
||||
que_run_threads(thr);
|
||||
return true;
|
||||
}
|
||||
@@ -1364,8 +1363,6 @@ static bool srv_task_execute()
|
||||
return false;
|
||||
}
|
||||
|
||||
static void purge_create_background_thds(int );
|
||||
|
||||
/** Flag which is set, whenever innodb_purge_threads changes. */
|
||||
static Atomic_relaxed<bool> srv_purge_thread_count_changed;
|
||||
|
||||
@@ -1379,7 +1376,7 @@ void srv_update_purge_thread_count(uint n)
|
||||
srv_purge_thread_count_changed = true;
|
||||
}
|
||||
|
||||
inline void purge_coordinator_state::do_purge()
|
||||
inline void purge_coordinator_state::do_purge(trx_t *trx)
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
@@ -1421,7 +1418,7 @@ inline void purge_coordinator_state::do_purge()
|
||||
break;
|
||||
}
|
||||
|
||||
ulint n_pages_handled= trx_purge(n_threads, history_size);
|
||||
ulint n_pages_handled= trx_purge(trx, n_threads, history_size);
|
||||
if (!trx_sys.history_exists())
|
||||
goto no_history;
|
||||
if (purge_sys.truncating_tablespace() ||
|
||||
@@ -1474,19 +1471,24 @@ static THD *acquire_thd(void **ctx)
|
||||
return thd;
|
||||
}
|
||||
|
||||
static void release_thd(THD *thd, void *ctx)
|
||||
extern struct handlerton *innodb_hton_ptr;
|
||||
|
||||
static void release_thd(trx_t *trx, void *ctx)
|
||||
{
|
||||
thd_detach_thd(ctx);
|
||||
std::unique_lock<std::mutex> lk(purge_thd_mutex);
|
||||
purge_thds.push_back(thd);
|
||||
lk.unlock();
|
||||
set_current_thd(0);
|
||||
THD *const thd= trx->mysql_thd;
|
||||
trx->free();
|
||||
thd_set_ha_data(thd, innodb_hton_ptr, nullptr);
|
||||
thd_detach_thd(ctx);
|
||||
std::unique_lock<std::mutex> lk(purge_thd_mutex);
|
||||
purge_thds.push_back(thd);
|
||||
lk.unlock();
|
||||
set_current_thd(nullptr);
|
||||
}
|
||||
|
||||
void srv_purge_worker_task_low()
|
||||
{
|
||||
ut_ad(current_thd);
|
||||
while (srv_task_execute())
|
||||
THD *const thd{current_thd};
|
||||
while (srv_task_execute(thd))
|
||||
ut_ad(purge_sys.running());
|
||||
}
|
||||
|
||||
@@ -1497,16 +1499,22 @@ static void purge_worker_callback(void*)
|
||||
ut_ad(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
|
||||
void *ctx;
|
||||
THD *thd= acquire_thd(&ctx);
|
||||
trx_t *trx= trx_create();
|
||||
trx->mysql_thd= thd;
|
||||
thd_set_ha_data(thd, innodb_hton_ptr, trx);
|
||||
srv_purge_worker_task_low();
|
||||
release_thd(thd,ctx);
|
||||
release_thd(trx, ctx);
|
||||
}
|
||||
|
||||
static void purge_coordinator_callback(void*)
|
||||
{
|
||||
void *ctx;
|
||||
THD *thd= acquire_thd(&ctx);
|
||||
purge_state.do_purge();
|
||||
release_thd(thd, ctx);
|
||||
trx_t *trx= trx_create();
|
||||
trx->mysql_thd= thd;
|
||||
thd_set_ha_data(thd, innodb_hton_ptr, trx);
|
||||
purge_state.do_purge(trx);
|
||||
release_thd(trx, ctx);
|
||||
}
|
||||
|
||||
void srv_init_purge_tasks()
|
||||
|
||||
@@ -433,7 +433,7 @@ static dberr_t srv_undo_delete_old_tablespaces()
|
||||
/** Recreate the undo log tablespaces */
|
||||
ATTRIBUTE_COLD static dberr_t srv_undo_tablespaces_reinit()
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
dberr_t err;
|
||||
buf_block_t *first_rseg_hdr;
|
||||
uint32_t latest_space_id;
|
||||
@@ -626,7 +626,7 @@ static dberr_t srv_undo_tablespaces_reinitialize()
|
||||
static uint32_t trx_rseg_get_n_undo_tablespaces()
|
||||
{
|
||||
std::set<uint32_t> space_ids;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
|
||||
if (const buf_block_t *sys_header=
|
||||
@@ -979,7 +979,7 @@ srv_open_tmp_tablespace(bool create_new_db)
|
||||
ib::error() << "Unable to create the shared innodb_temporary";
|
||||
} else if (fil_system.temp_space->open(true)) {
|
||||
/* Initialize the header page */
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
err = fsp_header_init(fil_system.temp_space,
|
||||
@@ -1255,13 +1255,52 @@ inline lsn_t log_t::init_lsn() noexcept
|
||||
|
||||
PRAGMA_DISABLE_CHECK_STACK_FRAME
|
||||
|
||||
/** Load the dictionary tables */
|
||||
static dberr_t srv_load_tables(bool must_upgrade_ibuf) noexcept
|
||||
{
|
||||
mem_heap_t *heap= mem_heap_create(1000);
|
||||
mtr_t mtr{nullptr};
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
dberr_t err = dict_load_indexes(&mtr, dict_sys.sys_tables, false, heap,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
mem_heap_empty(heap);
|
||||
if ((err == DB_SUCCESS || srv_force_recovery >= SRV_FORCE_NO_DDL_UNDO) &&
|
||||
UNIV_UNLIKELY(must_upgrade_ibuf))
|
||||
{
|
||||
dict_sys.unlock();
|
||||
dict_load_tablespaces(nullptr, true);
|
||||
err= ibuf_upgrade();
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
}
|
||||
if (err == DB_SUCCESS || srv_force_recovery >= SRV_FORCE_NO_DDL_UNDO)
|
||||
{
|
||||
err = dict_load_indexes(&mtr, dict_sys.sys_columns, false, heap,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
mem_heap_empty(heap);
|
||||
}
|
||||
if (err == DB_SUCCESS || srv_force_recovery >= SRV_FORCE_NO_DDL_UNDO)
|
||||
{
|
||||
err = dict_load_indexes(&mtr, dict_sys.sys_indexes, false, heap,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
mem_heap_empty(heap);
|
||||
}
|
||||
if (err == DB_SUCCESS || srv_force_recovery >= SRV_FORCE_NO_DDL_UNDO) {
|
||||
err = dict_load_indexes(&mtr, dict_sys.sys_fields, false, heap,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
}
|
||||
mem_heap_free(heap);
|
||||
dict_sys.unlock();
|
||||
dict_sys.load_sys_tables();
|
||||
return err;
|
||||
}
|
||||
|
||||
/** Start InnoDB.
|
||||
@param[in] create_new_db whether to create a new database
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t srv_start(bool create_new_db)
|
||||
{
|
||||
dberr_t err = DB_SUCCESS;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
ut_ad(srv_operation <= SRV_OPERATION_RESTORE_EXPORT
|
||||
|| srv_operation == SRV_OPERATION_RESTORE
|
||||
@@ -1474,7 +1513,6 @@ dberr_t srv_start(bool create_new_db)
|
||||
|
||||
|
||||
if (err == DB_SUCCESS) {
|
||||
mtr_t mtr;
|
||||
mtr.start();
|
||||
err= srv_undo_tablespaces_init(create_new_db, &mtr);
|
||||
mtr.commit();
|
||||
@@ -1636,25 +1674,11 @@ dberr_t srv_start(bool create_new_db)
|
||||
DBUG_PRINT("ib_log", ("apply completed"));
|
||||
|
||||
if (srv_operation != SRV_OPERATION_RESTORE) {
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
dict_load_sys_table(dict_sys.sys_tables);
|
||||
dict_sys.unlock();
|
||||
|
||||
if (UNIV_UNLIKELY(must_upgrade_ibuf)) {
|
||||
dict_load_tablespaces(nullptr, true);
|
||||
err = ibuf_upgrade();
|
||||
if (err != DB_SUCCESS) {
|
||||
return srv_init_abort(err);
|
||||
}
|
||||
err = srv_load_tables(must_upgrade_ibuf);
|
||||
if (err != DB_SUCCESS) {
|
||||
return srv_init_abort(err);
|
||||
}
|
||||
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
dict_load_sys_table(dict_sys.sys_columns);
|
||||
dict_load_sys_table(dict_sys.sys_indexes);
|
||||
dict_load_sys_table(dict_sys.sys_fields);
|
||||
dict_sys.unlock();
|
||||
dict_sys.load_sys_tables();
|
||||
|
||||
err = trx_lists_init_at_db_start();
|
||||
if (err != DB_SUCCESS) {
|
||||
return srv_init_abort(err);
|
||||
|
||||
@@ -580,7 +580,7 @@ fill_lock_data(
|
||||
return(*lock_data != NULL);
|
||||
}
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
const buf_block_t* block;
|
||||
const page_t* page;
|
||||
|
||||
@@ -338,7 +338,7 @@ void purge_sys_t::rseg_enable(trx_rseg_t &rseg)
|
||||
inline dberr_t purge_sys_t::iterator::free_history_rseg(trx_rseg_t &rseg) const
|
||||
{
|
||||
fil_addr_t hdr_addr;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
bool freed= false;
|
||||
uint32_t rseg_ref= 0;
|
||||
const auto last_boffset= srv_page_size - TRX_UNDO_LOG_OLD_HDR_SIZE;
|
||||
@@ -701,7 +701,7 @@ not_free:
|
||||
|
||||
log_free_check();
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
mtr.x_lock_space(space);
|
||||
/* Associate the undo tablespace with mtr.
|
||||
@@ -764,7 +764,7 @@ not_free:
|
||||
}
|
||||
}
|
||||
|
||||
buf_block_t *purge_sys_t::get_page(page_id_t id)
|
||||
buf_block_t *purge_sys_t::get_page(page_id_t id, trx_t *trx)
|
||||
{
|
||||
ut_ad(!recv_sys.recovery_on);
|
||||
|
||||
@@ -773,7 +773,7 @@ buf_block_t *purge_sys_t::get_page(page_id_t id)
|
||||
|
||||
if (!undo_page)
|
||||
{
|
||||
undo_page= buf_pool.page_fix(id); // batch_cleanup() will unfix()
|
||||
undo_page= buf_pool.page_fix(id, trx); // batch_cleanup() will unfix()
|
||||
if (!undo_page)
|
||||
pages.erase(id);
|
||||
else
|
||||
@@ -783,7 +783,7 @@ buf_block_t *purge_sys_t::get_page(page_id_t id)
|
||||
return undo_page;
|
||||
}
|
||||
|
||||
bool purge_sys_t::rseg_get_next_history_log()
|
||||
bool purge_sys_t::rseg_get_next_history_log(trx_t *trx) noexcept
|
||||
{
|
||||
fil_addr_t prev_log_addr;
|
||||
|
||||
@@ -795,7 +795,7 @@ bool purge_sys_t::rseg_get_next_history_log()
|
||||
next_stored= false;
|
||||
|
||||
if (buf_block_t *undo_page=
|
||||
get_page(page_id_t(rseg->space->id, rseg->last_page_no)))
|
||||
get_page(page_id_t(rseg->space->id, rseg->last_page_no), trx))
|
||||
{
|
||||
const byte *log_hdr= undo_page->page.frame + rseg->last_offset();
|
||||
prev_log_addr= flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE);
|
||||
@@ -816,7 +816,7 @@ bool purge_sys_t::rseg_get_next_history_log()
|
||||
/* Read the previous log header. */
|
||||
trx_id_t trx_no= 0;
|
||||
if (const buf_block_t* undo_page=
|
||||
get_page(page_id_t(rseg->space->id, prev_log_addr.page)))
|
||||
get_page(page_id_t(rseg->space->id, prev_log_addr.page), trx))
|
||||
{
|
||||
const byte *log_hdr= undo_page->page.frame + prev_log_addr.boffset;
|
||||
trx_no= mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO);
|
||||
@@ -839,13 +839,14 @@ bool purge_sys_t::rseg_get_next_history_log()
|
||||
}
|
||||
|
||||
rseg->latch.wr_unlock();
|
||||
return choose_next_log();
|
||||
return choose_next_log(trx);
|
||||
}
|
||||
|
||||
/** Position the purge sys "iterator" on the undo record to use for purging.
|
||||
@param trx transaction attached to current_thd
|
||||
@retval false when nothing is to be purged
|
||||
@retval true when purge_sys.rseg->latch was locked */
|
||||
bool purge_sys_t::choose_next_log()
|
||||
bool purge_sys_t::choose_next_log(trx_t *trx) noexcept
|
||||
{
|
||||
ut_ad(!next_stored);
|
||||
|
||||
@@ -889,7 +890,7 @@ bool purge_sys_t::choose_next_log()
|
||||
else
|
||||
{
|
||||
page_id_t id{rseg->space->id, hdr_page_no};
|
||||
buf_block_t *b= get_page(id);
|
||||
buf_block_t *b= get_page(id, trx);
|
||||
if (!b)
|
||||
goto purge_nothing;
|
||||
const trx_undo_rec_t *undo_rec=
|
||||
@@ -904,7 +905,7 @@ bool purge_sys_t::choose_next_log()
|
||||
if (next == FIL_NULL)
|
||||
goto purge_nothing;
|
||||
id.set_page_no(next);
|
||||
b= get_page(id);
|
||||
b= get_page(id, trx);
|
||||
if (!b)
|
||||
goto purge_nothing;
|
||||
undo_rec=
|
||||
@@ -925,11 +926,14 @@ bool purge_sys_t::choose_next_log()
|
||||
|
||||
/**
|
||||
Get the next record to purge and update the info in the purge system.
|
||||
@param trx transaction attached to current_thd
|
||||
@param roll_ptr undo log pointer to the record
|
||||
@return buffer-fixed reference to undo log record
|
||||
@retval {nullptr,1} if the whole undo log can skipped in purge
|
||||
@retval {nullptr,0} if nothing is left, or on corruption */
|
||||
inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
|
||||
inline
|
||||
trx_purge_rec_t purge_sys_t::get_next_rec(trx_t *trx, roll_ptr_t roll_ptr)
|
||||
noexcept
|
||||
{
|
||||
ut_ad(next_stored);
|
||||
ut_ad(tail.trx_no < low_limit_no());
|
||||
@@ -939,7 +943,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
|
||||
{
|
||||
/* It is the dummy undo log record, which means that there is no need to
|
||||
purge this undo log. Look for the next undo log and record to purge */
|
||||
if (rseg_get_next_history_log())
|
||||
if (rseg_get_next_history_log(trx))
|
||||
rseg->latch.wr_unlock();
|
||||
return {nullptr, 1};
|
||||
}
|
||||
@@ -948,7 +952,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
|
||||
|
||||
page_id_t page_id{rseg->space->id, page_no};
|
||||
bool locked= true;
|
||||
buf_block_t *b= get_page(page_id);
|
||||
buf_block_t *b= get_page(page_id, trx);
|
||||
if (UNIV_UNLIKELY(!b))
|
||||
{
|
||||
if (locked)
|
||||
@@ -974,7 +978,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
|
||||
if (next != FIL_NULL)
|
||||
{
|
||||
page_id.set_page_no(next);
|
||||
if (buf_block_t *next_page= get_page(page_id))
|
||||
if (buf_block_t *next_page= get_page(page_id, trx))
|
||||
{
|
||||
rec2= trx_undo_page_get_first_rec(next_page, hdr_page_no, hdr_offset);
|
||||
if (rec2)
|
||||
@@ -991,7 +995,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
|
||||
{
|
||||
got_no_rec:
|
||||
/* Look for the next undo log and record to purge */
|
||||
locked= rseg_get_next_history_log();
|
||||
locked= rseg_get_next_history_log(trx);
|
||||
}
|
||||
|
||||
if (locked)
|
||||
@@ -1000,13 +1004,13 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
|
||||
return {b->page.frame + uint16_t(roll_ptr), roll_ptr};
|
||||
}
|
||||
|
||||
inline trx_purge_rec_t purge_sys_t::fetch_next_rec()
|
||||
inline trx_purge_rec_t purge_sys_t::fetch_next_rec(trx_t *trx) noexcept
|
||||
{
|
||||
roll_ptr_t roll_ptr;
|
||||
|
||||
if (!next_stored)
|
||||
{
|
||||
bool locked= choose_next_log();
|
||||
bool locked= choose_next_log(trx);
|
||||
ut_ad(locked == next_stored);
|
||||
if (!locked)
|
||||
goto got_nothing;
|
||||
@@ -1031,7 +1035,7 @@ inline trx_purge_rec_t purge_sys_t::fetch_next_rec()
|
||||
}
|
||||
|
||||
/* The following will advance the purge iterator. */
|
||||
return get_next_rec(roll_ptr);
|
||||
return get_next_rec(trx, roll_ptr);
|
||||
}
|
||||
|
||||
/** Close all tables that were opened in a purge batch for a worker.
|
||||
@@ -1198,25 +1202,23 @@ dict_table_t *purge_sys_t::close_and_reopen(table_id_t id, THD *thd,
|
||||
}
|
||||
|
||||
/** Run a purge batch.
|
||||
@param n_purge_threads number of purge threads
|
||||
@param thd purge coordinator thread handle
|
||||
@param n_work_items number of work items (currently tables) to process
|
||||
@return new purge_sys.head */
|
||||
static purge_sys_t::iterator trx_purge_attach_undo_recs(THD *thd,
|
||||
ulint *n_work_items)
|
||||
static purge_sys_t::iterator
|
||||
trx_purge_attach_undo_recs(trx_t *trx, ulint *n_work_items) noexcept
|
||||
{
|
||||
que_thr_t *thr;
|
||||
que_thr_t *thr= nullptr;
|
||||
purge_sys_t::iterator head= purge_sys.tail;
|
||||
|
||||
/* Fetch and parse the UNDO records. The UNDO records are added
|
||||
to a per purge node vector. */
|
||||
thr= nullptr;
|
||||
|
||||
std::unordered_map<table_id_t, purge_node_t *>
|
||||
table_id_map(TRX_PURGE_TABLE_BUCKETS);
|
||||
purge_sys.m_active= true;
|
||||
|
||||
while (UNIV_LIKELY(srv_undo_sources) || !srv_fast_shutdown)
|
||||
for (THD *const thd{trx->mysql_thd};
|
||||
UNIV_LIKELY(srv_undo_sources) || !srv_fast_shutdown; )
|
||||
{
|
||||
/* Track the max {trx_id, undo_no} for truncating the
|
||||
UNDO logs once we have purged the records. */
|
||||
@@ -1225,7 +1227,7 @@ static purge_sys_t::iterator trx_purge_attach_undo_recs(THD *thd,
|
||||
head= purge_sys.tail;
|
||||
|
||||
/* Fetch the next record, and advance the purge_sys.tail. */
|
||||
trx_purge_rec_t purge_rec= purge_sys.fetch_next_rec();
|
||||
trx_purge_rec_t purge_rec= purge_sys.fetch_next_rec(trx);
|
||||
|
||||
if (!purge_rec.undo_rec)
|
||||
{
|
||||
@@ -1341,10 +1343,11 @@ void purge_sys_t::batch_cleanup(const purge_sys_t::iterator &head)
|
||||
|
||||
/**
|
||||
Run a purge batch.
|
||||
@param trx dummy transaction associated with the purge coordinator
|
||||
@param n_tasks number of purge tasks to submit to the queue
|
||||
@param history_size trx_sys.history_size()
|
||||
@return number of undo log pages handled in the batch */
|
||||
ulint trx_purge(ulint n_tasks, ulint history_size)
|
||||
ulint trx_purge(trx_t *trx, ulint n_tasks, ulint history_size) noexcept
|
||||
{
|
||||
ut_ad(n_tasks > 0);
|
||||
|
||||
@@ -1352,11 +1355,10 @@ ulint trx_purge(ulint n_tasks, ulint history_size)
|
||||
|
||||
ut_d(if (srv_purge_view_update_only_debug) return 0);
|
||||
|
||||
THD *const thd= current_thd;
|
||||
|
||||
/* Fetch the UNDO recs that need to be purged. */
|
||||
ulint n_work= 0;
|
||||
const purge_sys_t::iterator head= trx_purge_attach_undo_recs(thd, &n_work);
|
||||
THD *const thd{trx->mysql_thd};
|
||||
const purge_sys_t::iterator head= trx_purge_attach_undo_recs(trx, &n_work);
|
||||
const size_t n_pages= purge_sys.n_pages_handled();
|
||||
|
||||
{
|
||||
|
||||
@@ -39,7 +39,6 @@ Created 3/26/1996 Heikki Tuuri
|
||||
#include "row0row.h"
|
||||
#include "row0mysql.h"
|
||||
#include "row0ins.h"
|
||||
#include "mariadb_stats.h"
|
||||
|
||||
/** The search tuple corresponding to TRX_UNDO_INSERT_METADATA. */
|
||||
const dtuple_t trx_undo_metadata = {
|
||||
@@ -401,7 +400,6 @@ static
|
||||
uint16_t
|
||||
trx_undo_page_report_insert(
|
||||
buf_block_t* undo_block,
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
const dtuple_t* clust_entry,
|
||||
mtr_t* mtr,
|
||||
@@ -431,7 +429,7 @@ trx_undo_page_report_insert(
|
||||
|
||||
/* Store first some general parameters to the undo log */
|
||||
*ptr++ = TRX_UNDO_INSERT_REC;
|
||||
ptr += mach_u64_write_much_compressed(ptr, trx->undo_no);
|
||||
ptr += mach_u64_write_much_compressed(ptr, mtr->trx->undo_no);
|
||||
ptr += mach_u64_write_much_compressed(ptr, index->table->id);
|
||||
|
||||
if (write_empty) {
|
||||
@@ -785,7 +783,6 @@ uint16_t
|
||||
trx_undo_page_report_modify(
|
||||
/*========================*/
|
||||
buf_block_t* undo_block, /*!< in: undo log page */
|
||||
trx_t* trx, /*!< in: transaction */
|
||||
dict_index_t* index, /*!< in: clustered index where update or
|
||||
delete marking is done */
|
||||
const rec_t* rec, /*!< in: clustered index record which
|
||||
@@ -833,12 +830,16 @@ trx_undo_page_report_modify(
|
||||
byte* type_cmpl_ptr;
|
||||
ulint i;
|
||||
trx_id_t trx_id;
|
||||
ibool ignore_prefix = FALSE;
|
||||
bool ignore_prefix = false;
|
||||
byte ext_buf[REC_VERSION_56_MAX_INDEX_COL_LEN
|
||||
+ BTR_EXTERN_FIELD_REF_SIZE];
|
||||
bool first_v_col = true;
|
||||
|
||||
/* Store first some general parameters to the undo log */
|
||||
field = rec_get_nth_field(rec, offsets, index->db_trx_id(), &flen);
|
||||
ut_ad(flen == DATA_TRX_ID_LEN);
|
||||
|
||||
trx_id = trx_read_trx_id(field);
|
||||
|
||||
if (!update) {
|
||||
ut_ad(!rec_is_delete_marked(rec, dict_table_is_comp(table)));
|
||||
@@ -846,14 +847,15 @@ trx_undo_page_report_modify(
|
||||
} else if (rec_is_delete_marked(rec, dict_table_is_comp(table))) {
|
||||
/* In delete-marked records, DB_TRX_ID must
|
||||
always refer to an existing update_undo log record. */
|
||||
ut_ad(row_get_rec_trx_id(rec, index, offsets));
|
||||
ut_ad(trx_id);
|
||||
|
||||
type_cmpl = TRX_UNDO_UPD_DEL_REC;
|
||||
|
||||
/* We are about to update a delete marked record.
|
||||
We don't typically need the prefix in this case unless
|
||||
We don't typically need a BLOB prefix in this case unless
|
||||
the delete marking is done by the same transaction
|
||||
(which we check below). */
|
||||
ignore_prefix = TRUE;
|
||||
ignore_prefix = trx_id != mtr->trx->id;
|
||||
} else {
|
||||
type_cmpl = TRX_UNDO_UPD_EXIST_REC;
|
||||
}
|
||||
@@ -862,7 +864,7 @@ trx_undo_page_report_modify(
|
||||
type_cmpl_ptr = ptr;
|
||||
|
||||
*ptr++ = (byte) type_cmpl;
|
||||
ptr += mach_u64_write_much_compressed(ptr, trx->undo_no);
|
||||
ptr += mach_u64_write_much_compressed(ptr, mtr->trx->undo_no);
|
||||
|
||||
ptr += mach_u64_write_much_compressed(ptr, table->id);
|
||||
|
||||
@@ -872,18 +874,6 @@ trx_undo_page_report_modify(
|
||||
*ptr++ = (byte) rec_get_info_bits(rec, dict_table_is_comp(table));
|
||||
|
||||
/* Store the values of the system columns */
|
||||
field = rec_get_nth_field(rec, offsets, index->db_trx_id(), &flen);
|
||||
ut_ad(flen == DATA_TRX_ID_LEN);
|
||||
|
||||
trx_id = trx_read_trx_id(field);
|
||||
|
||||
/* If it is an update of a delete marked record, then we are
|
||||
allowed to ignore blob prefixes if the delete marking was done
|
||||
by some other trx as it must have committed by now for us to
|
||||
allow an over-write. */
|
||||
if (trx_id == trx->id) {
|
||||
ignore_prefix = false;
|
||||
}
|
||||
ptr += mach_u64_write_compressed(ptr, trx_id);
|
||||
|
||||
field = rec_get_nth_field(rec, offsets, index->db_roll_ptr(), &flen);
|
||||
@@ -1676,7 +1666,6 @@ trx_undo_update_rec_get_update(
|
||||
}
|
||||
|
||||
/** Report a RENAME TABLE operation.
|
||||
@param[in,out] trx transaction
|
||||
@param[in] table table that is being renamed
|
||||
@param[in,out] block undo page
|
||||
@param[in,out] mtr mini-transaction
|
||||
@@ -1684,7 +1673,7 @@ trx_undo_update_rec_get_update(
|
||||
@retval 0 in case of failure */
|
||||
static
|
||||
uint16_t
|
||||
trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
|
||||
trx_undo_page_report_rename(const dict_table_t* table,
|
||||
buf_block_t* block, mtr_t* mtr)
|
||||
{
|
||||
byte* ptr_first_free = my_assume_aligned<2>(TRX_UNDO_PAGE_HDR
|
||||
@@ -1710,7 +1699,7 @@ trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
|
||||
|
||||
byte* ptr = start + 2;
|
||||
*ptr++ = TRX_UNDO_RENAME_TABLE;
|
||||
ptr += mach_u64_write_much_compressed(ptr, trx->undo_no);
|
||||
ptr += mach_u64_write_much_compressed(ptr, mtr->trx->undo_no);
|
||||
ptr += mach_u64_write_much_compressed(ptr, table->id);
|
||||
memcpy(ptr, table->name.m_name, len);
|
||||
ptr += len;
|
||||
@@ -1731,10 +1720,10 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
|
||||
ut_ad(trx->id);
|
||||
ut_ad(!table->is_temporary());
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
dberr_t err;
|
||||
mtr.start();
|
||||
if (buf_block_t* block = trx_undo_assign(trx, &err, &mtr)) {
|
||||
if (buf_block_t* block = trx_undo_assign(&mtr, &err)) {
|
||||
trx_undo_t* undo = trx->rsegs.m_redo.undo;
|
||||
ut_ad(err == DB_SUCCESS);
|
||||
ut_ad(undo);
|
||||
@@ -1744,7 +1733,7 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
|
||||
== block->page.id().page_no());
|
||||
|
||||
if (uint16_t offset = trx_undo_page_report_rename(
|
||||
trx, table, block, &mtr)) {
|
||||
table, block, &mtr)) {
|
||||
undo->top_page_no = undo->last_page_no;
|
||||
undo->top_offset = offset;
|
||||
undo->top_undo_no = trx->undo_no++;
|
||||
@@ -1890,7 +1879,7 @@ trx_undo_report_row_operation(
|
||||
bulk = false;
|
||||
}
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
dberr_t err;
|
||||
mtr.start();
|
||||
trx_undo_t** pundo;
|
||||
@@ -1902,15 +1891,15 @@ trx_undo_report_row_operation(
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
rseg = trx->get_temp_rseg();
|
||||
pundo = &trx->rsegs.m_noredo.undo;
|
||||
undo_block = trx_undo_assign_low<true>(trx, rseg, pundo,
|
||||
&mtr, &err);
|
||||
undo_block = trx_undo_assign_low<true>(&mtr, &err,
|
||||
rseg, pundo);
|
||||
} else {
|
||||
ut_ad(!trx->read_only);
|
||||
ut_ad(trx->id);
|
||||
pundo = &trx->rsegs.m_redo.undo;
|
||||
rseg = trx->rsegs.m_redo.rseg;
|
||||
undo_block = trx_undo_assign_low<false>(trx, rseg, pundo,
|
||||
&mtr, &err);
|
||||
undo_block = trx_undo_assign_low<false>(&mtr, &err,
|
||||
rseg, pundo);
|
||||
}
|
||||
|
||||
trx_undo_t* undo = *pundo;
|
||||
@@ -1926,10 +1915,10 @@ err_exit:
|
||||
do {
|
||||
uint16_t offset = !rec
|
||||
? trx_undo_page_report_insert(
|
||||
undo_block, trx, index, clust_entry, &mtr,
|
||||
undo_block, index, clust_entry, &mtr,
|
||||
bulk)
|
||||
: trx_undo_page_report_modify(
|
||||
undo_block, trx, index, rec, offsets, update,
|
||||
undo_block, index, rec, offsets, update,
|
||||
cmpl_info, clust_entry, &mtr);
|
||||
|
||||
if (UNIV_UNLIKELY(offset == 0)) {
|
||||
@@ -2069,7 +2058,7 @@ static dberr_t trx_undo_prev_version(const rec_t *rec, dict_index_t *index,
|
||||
const trx_undo_rec_t *undo_rec);
|
||||
|
||||
inline const buf_block_t *
|
||||
purge_sys_t::view_guard::get(const page_id_t id, mtr_t *mtr)
|
||||
purge_sys_t::view_guard::get(const page_id_t id, trx_t *trx, mtr_t *mtr)
|
||||
{
|
||||
buf_block_t *block;
|
||||
ut_ad(mtr->is_active());
|
||||
@@ -2083,7 +2072,7 @@ purge_sys_t::view_guard::get(const page_id_t id, mtr_t *mtr)
|
||||
return block;
|
||||
}
|
||||
}
|
||||
block= buf_pool.page_fix(id);
|
||||
block= buf_pool.page_fix(id, trx);
|
||||
if (block)
|
||||
{
|
||||
mtr->memo_push(block, MTR_MEMO_BUF_FIX);
|
||||
@@ -2140,7 +2129,9 @@ dberr_t trx_undo_prev_version_build(const rec_t *rec, dict_index_t *index,
|
||||
|
||||
ut_ad(!index->table->skip_alter_undo);
|
||||
|
||||
mariadb_increment_undo_records_read();
|
||||
if (!mtr->trx);
|
||||
else if (ha_handler_stats *stats= mtr->trx->active_handler_stats)
|
||||
stats->undo_records_read++;
|
||||
const auto savepoint= mtr->get_savepoint();
|
||||
dberr_t err= DB_MISSING_HISTORY;
|
||||
purge_sys_t::view_guard check{v_status == TRX_UNDO_CHECK_PURGE_PAGES
|
||||
@@ -2156,7 +2147,7 @@ dberr_t trx_undo_prev_version_build(const rec_t *rec, dict_index_t *index,
|
||||
if (const buf_block_t *undo_page=
|
||||
check.get(page_id_t{trx_sys.rseg_array[(roll_ptr >> 48) & 0x7f].
|
||||
space->id,
|
||||
uint32_t(roll_ptr >> 16)}, mtr))
|
||||
uint32_t(roll_ptr >> 16)}, mtr->trx, mtr))
|
||||
{
|
||||
static_assert(ROLL_PTR_BYTE_POS == 0, "");
|
||||
const uint16_t offset{uint16_t(roll_ptr)};
|
||||
|
||||
@@ -86,8 +86,7 @@ bool trx_t::rollback_finish() noexcept
|
||||
ut_free(undo);
|
||||
undo= nullptr;
|
||||
}
|
||||
commit_low();
|
||||
return commit_cleanup();
|
||||
return commit();
|
||||
}
|
||||
|
||||
dberr_t trx_t::rollback_low(const undo_no_t *savept) noexcept
|
||||
@@ -238,10 +237,10 @@ dberr_t trx_rollback_for_mysql(trx_t* trx)
|
||||
the actions already having been rolled back. */
|
||||
ut_ad(trx->rsegs.m_redo.undo->rseg
|
||||
== trx->rsegs.m_redo.rseg);
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
mtr.start();
|
||||
if (trx_undo_t* undo = trx->rsegs.m_redo.undo) {
|
||||
trx_undo_set_state_at_prepare(trx, undo, true,
|
||||
trx_undo_set_state_at_prepare(undo, true,
|
||||
&mtr);
|
||||
}
|
||||
/* Write the redo log for the XA ROLLBACK
|
||||
|
||||
@@ -169,7 +169,7 @@ segments will be reset.
|
||||
@param[in] xid WSREP XID */
|
||||
void trx_rseg_update_wsrep_checkpoint(const XID* xid)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
trx_rseg_update_wsrep_checkpoint(xid, &mtr);
|
||||
mtr.commit();
|
||||
@@ -246,7 +246,7 @@ static bool trx_rseg_init_wsrep_xid(const page_t* page, XID& xid)
|
||||
@return whether the WSREP XID was found */
|
||||
bool trx_rseg_read_wsrep_checkpoint(XID& xid)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
long long max_xid_seqno = -1;
|
||||
bool found = false;
|
||||
|
||||
@@ -606,7 +606,7 @@ dberr_t trx_rseg_array_init()
|
||||
wsrep_sys_xid.null();
|
||||
bool wsrep_xid_in_rseg_found = false;
|
||||
#endif
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
dberr_t err = DB_SUCCESS;
|
||||
/* mariabackup --prepare only deals with the redo log and the data
|
||||
files, not with transactions or the data dictionary, that's why
|
||||
|
||||
@@ -111,7 +111,7 @@ static
|
||||
void
|
||||
trx_sysf_get_n_rseg_slots()
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
mtr.start();
|
||||
|
||||
srv_available_undo_logs = 0;
|
||||
@@ -251,7 +251,7 @@ bool trx_sys_t::find_same_or_older_low(trx_t *trx, trx_id_t id) noexcept
|
||||
static trx_rseg_t *trx_rseg_create(uint32_t space_id)
|
||||
{
|
||||
trx_rseg_t *rseg= nullptr;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
mtr.start();
|
||||
|
||||
|
||||
@@ -375,6 +375,13 @@ void trx_t::free()
|
||||
#endif
|
||||
MEM_CHECK_DEFINED(this, sizeof *this);
|
||||
autoinc_locks.make_undefined();
|
||||
ut_ad(!active_handler_stats);
|
||||
|
||||
if (size_t n_page_gets= pages_accessed)
|
||||
{
|
||||
pages_accessed= 0;
|
||||
buf_pool.stat.n_page_gets+= n_page_gets;
|
||||
}
|
||||
|
||||
ut_ad(!n_mysql_tables_in_use);
|
||||
ut_ad(!mysql_log_file_name);
|
||||
@@ -569,7 +576,7 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo)
|
||||
if (undo.empty())
|
||||
return DB_SUCCESS;
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
std::map<table_id_t, bool> tables;
|
||||
mtr.start();
|
||||
|
||||
@@ -1179,6 +1186,7 @@ inline void trx_t::write_serialisation_history(mtr_t *mtr)
|
||||
else
|
||||
rseg->release();
|
||||
mtr->commit();
|
||||
commit_lsn= undo_no || !xid.is_null() ? mtr->commit_lsn() : 0;
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
@@ -1334,8 +1342,10 @@ void trx_t::evict_table(table_id_t table_id, bool reset_only)
|
||||
}
|
||||
|
||||
/** Free temporary undo log after commit or rollback.
|
||||
@param mtr mini-transaction
|
||||
@param undo temporary undo log */
|
||||
ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo)
|
||||
ATTRIBUTE_NOINLINE static void trx_commit_cleanup(mtr_t *mtr,
|
||||
trx_undo_t *&undo)
|
||||
{
|
||||
trx_rseg_t *const rseg= undo->rseg;
|
||||
ut_ad(rseg->space == fil_system.temp_space);
|
||||
@@ -1345,23 +1355,22 @@ ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo)
|
||||
ut_ad(undo->id < TRX_RSEG_N_SLOTS);
|
||||
/* Delete first the undo log segment in the file */
|
||||
bool finished;
|
||||
mtr_t mtr;
|
||||
do
|
||||
{
|
||||
mtr.start();
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
mtr->start();
|
||||
mtr->set_log_mode(MTR_LOG_NO_REDO);
|
||||
|
||||
finished= true;
|
||||
|
||||
if (buf_block_t *block=
|
||||
buf_page_get(page_id_t(SRV_TMP_SPACE_ID, undo->hdr_page_no), 0,
|
||||
RW_X_LATCH, &mtr))
|
||||
RW_X_LATCH, mtr))
|
||||
{
|
||||
finished= fseg_free_step(block, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
|
||||
&mtr);
|
||||
mtr);
|
||||
|
||||
if (!finished);
|
||||
else if (buf_block_t *rseg_header= rseg->get(&mtr, nullptr))
|
||||
else if (buf_block_t *rseg_header= rseg->get(mtr, nullptr))
|
||||
{
|
||||
static_assert(FIL_NULL == 0xffffffff, "compatibility");
|
||||
memset(rseg_header->page.frame + TRX_RSEG + TRX_RSEG_UNDO_SLOTS +
|
||||
@@ -1369,7 +1378,7 @@ ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo)
|
||||
}
|
||||
}
|
||||
|
||||
mtr.commit();
|
||||
mtr->commit();
|
||||
}
|
||||
while (!finished);
|
||||
|
||||
@@ -1380,7 +1389,7 @@ ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo)
|
||||
undo= nullptr;
|
||||
}
|
||||
|
||||
TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr)
|
||||
TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(mtr_t *mtr)
|
||||
{
|
||||
/* We already detached from rseg in write_serialisation_history() */
|
||||
ut_ad(!rsegs.m_redo.undo);
|
||||
@@ -1450,20 +1459,8 @@ TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr)
|
||||
release_locks();
|
||||
}
|
||||
|
||||
if (trx_undo_t *&undo= rsegs.m_noredo.undo)
|
||||
if (commit_lsn)
|
||||
{
|
||||
ut_ad(undo->rseg == rsegs.m_noredo.rseg);
|
||||
trx_commit_cleanup(undo);
|
||||
}
|
||||
|
||||
if (mtr)
|
||||
{
|
||||
/* NOTE that we could possibly make a group commit more efficient
|
||||
here: call std::this_thread::yield() here to allow also other trxs to come
|
||||
to commit! */
|
||||
|
||||
/*-------------------------------------*/
|
||||
|
||||
/* Depending on the my.cnf options, we may now write the log
|
||||
buffer to the log files, making the transaction durable if the OS
|
||||
does not crash. We may also flush the log files to disk, making
|
||||
@@ -1485,14 +1482,19 @@ TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr)
|
||||
serialize all commits and prevent a group of transactions from
|
||||
gathering. */
|
||||
|
||||
commit_lsn= undo_no || !xid.is_null() ? mtr->commit_lsn() : 0;
|
||||
if (commit_lsn && !flush_log_later && srv_flush_log_at_trx_commit)
|
||||
if (!flush_log_later && srv_flush_log_at_trx_commit)
|
||||
{
|
||||
trx_flush_log_if_needed(commit_lsn, this);
|
||||
commit_lsn= 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (trx_undo_t *&undo= rsegs.m_noredo.undo)
|
||||
{
|
||||
ut_ad(undo->rseg == rsegs.m_noredo.rseg);
|
||||
trx_commit_cleanup(mtr, undo);
|
||||
}
|
||||
|
||||
if (fts_trx)
|
||||
trx_finalize_for_fts(this, undo_no != 0);
|
||||
|
||||
@@ -1520,6 +1522,11 @@ bool trx_t::commit_cleanup() noexcept
|
||||
for (auto &t : mod_tables)
|
||||
delete t.second.bulk_store;
|
||||
|
||||
if (size_t n_page_gets= pages_accessed)
|
||||
{
|
||||
pages_accessed= 0;
|
||||
buf_pool.stat.n_page_gets+= n_page_gets;
|
||||
}
|
||||
mutex.wr_lock();
|
||||
state= TRX_STATE_NOT_STARTED;
|
||||
*detailed_error= '\0';
|
||||
@@ -1536,14 +1543,11 @@ bool trx_t::commit_cleanup() noexcept
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Commit the transaction in a mini-transaction.
|
||||
@param mtr mini-transaction (if there are any persistent modifications) */
|
||||
TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr)
|
||||
/** Commit the transaction in the file system. */
|
||||
TRANSACTIONAL_TARGET void trx_t::commit_persist() noexcept
|
||||
{
|
||||
ut_ad(!mtr || mtr->is_active());
|
||||
ut_d(bool aborted= in_rollback && error_state == DB_DEADLOCK);
|
||||
ut_ad(!mtr == (aborted || !has_logged_persistent()));
|
||||
ut_ad(!mtr || !aborted);
|
||||
mtr_t mtr{this};
|
||||
mtr.start();
|
||||
|
||||
if (fts_trx && undo_no)
|
||||
{
|
||||
@@ -1563,8 +1567,9 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr)
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
const bool debug_sync= mysql_thd && has_logged_persistent();
|
||||
#endif
|
||||
commit_lsn =0;
|
||||
|
||||
if (mtr)
|
||||
if (has_logged_persistent())
|
||||
{
|
||||
if (UNIV_UNLIKELY(apply_online_log))
|
||||
apply_log();
|
||||
@@ -1580,7 +1585,7 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr)
|
||||
different rollback segments. However, if a transaction T2 is
|
||||
able to see modifications made by a transaction T1, T2 will always
|
||||
get a bigger transaction number and a bigger commit lsn than T1. */
|
||||
write_serialisation_history(mtr);
|
||||
write_serialisation_history(&mtr);
|
||||
}
|
||||
else if (trx_rseg_t *rseg= rsegs.m_redo.rseg)
|
||||
{
|
||||
@@ -1594,25 +1599,11 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr)
|
||||
DEBUG_SYNC_C("before_trx_state_committed_in_memory");
|
||||
#endif
|
||||
|
||||
commit_in_memory(mtr);
|
||||
commit_in_memory(&mtr);
|
||||
}
|
||||
|
||||
|
||||
void trx_t::commit_persist() noexcept
|
||||
{
|
||||
mtr_t *mtr= nullptr;
|
||||
mtr_t local_mtr;
|
||||
|
||||
if (has_logged_persistent())
|
||||
{
|
||||
mtr= &local_mtr;
|
||||
local_mtr.start();
|
||||
}
|
||||
commit_low(mtr);
|
||||
}
|
||||
|
||||
|
||||
void trx_t::commit() noexcept
|
||||
bool trx_t::commit() noexcept
|
||||
{
|
||||
ut_ad(!was_dict_operation);
|
||||
ut_d(was_dict_operation= dict_operation);
|
||||
@@ -1623,7 +1614,7 @@ void trx_t::commit() noexcept
|
||||
for (const auto &p : mod_tables) ut_ad(!p.second.is_dropped());
|
||||
#endif /* UNIV_DEBUG */
|
||||
ut_d(was_dict_operation= false);
|
||||
commit_cleanup();
|
||||
return commit_cleanup();
|
||||
}
|
||||
|
||||
|
||||
@@ -1906,14 +1897,14 @@ static lsn_t trx_prepare_low(trx_t *trx)
|
||||
{
|
||||
ut_ad(!trx->is_recovered);
|
||||
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{trx};
|
||||
|
||||
if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
|
||||
ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
|
||||
|
||||
mtr.start();
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
trx_undo_set_state_at_prepare(trx, undo, false, &mtr);
|
||||
trx_undo_set_state_at_prepare(undo, false, &mtr);
|
||||
mtr.commit();
|
||||
}
|
||||
|
||||
@@ -1932,7 +1923,7 @@ static lsn_t trx_prepare_low(trx_t *trx)
|
||||
TRX_UNDO_PREPARED: these modifications to the file data
|
||||
structure define the transaction as prepared in the file-based
|
||||
world, at the serialization point of lsn. */
|
||||
trx_undo_set_state_at_prepare(trx, undo, false, &mtr);
|
||||
trx_undo_set_state_at_prepare(undo, false, &mtr);
|
||||
|
||||
/* Make the XA PREPARE durable. */
|
||||
mtr.commit();
|
||||
|
||||
@@ -353,11 +353,12 @@ ATTRIBUTE_COLD void trx_t::apply_log()
|
||||
return;
|
||||
const page_id_t page_id{rsegs.m_redo.rseg->space->id, undo->hdr_page_no};
|
||||
page_id_t next_page_id(page_id);
|
||||
buf_block_t *block= buf_pool.page_fix(page_id, nullptr, buf_pool_t::FIX_WAIT_READ);
|
||||
buf_block_t *block=
|
||||
buf_pool.page_fix(page_id, nullptr, this, buf_pool_t::FIX_WAIT_READ);
|
||||
if (UNIV_UNLIKELY(!block))
|
||||
return;
|
||||
|
||||
UndorecApplier log_applier(page_id, id);
|
||||
UndorecApplier log_applier(page_id, *this);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
@@ -380,7 +381,8 @@ ATTRIBUTE_COLD void trx_t::apply_log()
|
||||
if (next == FIL_NULL)
|
||||
break;
|
||||
next_page_id.set_page_no(next);
|
||||
block= buf_pool.page_fix(next_page_id, nullptr, buf_pool_t::FIX_WAIT_READ);
|
||||
block= buf_pool.page_fix(next_page_id, nullptr, this,
|
||||
buf_pool_t::FIX_WAIT_READ);
|
||||
if (UNIV_UNLIKELY(!block))
|
||||
break;
|
||||
log_applier.assign_next(next_page_id);
|
||||
@@ -802,21 +804,17 @@ dberr_t trx_undo_free_last_page(trx_undo_t *undo, mtr_t *mtr)
|
||||
|
||||
/** Truncate the tail of an undo log during rollback.
|
||||
@param[in,out] undo undo log
|
||||
@param[in] limit all undo logs after this limit will be discarded
|
||||
@param[in] is_temp whether this is temporary undo log
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return error code */
|
||||
static dberr_t trx_undo_truncate_end(trx_undo_t &undo, undo_no_t limit,
|
||||
bool is_temp)
|
||||
static dberr_t trx_undo_truncate_end(trx_undo_t &undo, mtr_t &mtr)
|
||||
{
|
||||
ut_ad(is_temp == !undo.rseg->is_persistent());
|
||||
|
||||
if (UNIV_UNLIKELY(undo.last_page_no == FIL_NULL))
|
||||
return DB_CORRUPTION;
|
||||
|
||||
for (mtr_t mtr;;)
|
||||
for (const auto limit= mtr.trx->undo_no;;)
|
||||
{
|
||||
mtr.start();
|
||||
if (is_temp)
|
||||
if (!undo.rseg->is_persistent())
|
||||
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
|
||||
trx_undo_rec_t *trunc_here= nullptr;
|
||||
@@ -867,19 +865,21 @@ func_exit:
|
||||
/** Try to truncate the undo logs.
|
||||
@param trx transaction
|
||||
@return error code */
|
||||
dberr_t trx_undo_try_truncate(const trx_t &trx)
|
||||
dberr_t trx_undo_try_truncate(trx_t *trx)
|
||||
{
|
||||
if (trx_undo_t *undo= trx.rsegs.m_redo.undo)
|
||||
mtr_t mtr{trx};
|
||||
|
||||
if (trx_undo_t *undo= trx->rsegs.m_redo.undo)
|
||||
{
|
||||
ut_ad(undo->rseg == trx.rsegs.m_redo.rseg);
|
||||
if (dberr_t err= trx_undo_truncate_end(*undo, trx.undo_no, false))
|
||||
ut_ad(undo->rseg == trx->rsegs.m_redo.rseg);
|
||||
if (dberr_t err= trx_undo_truncate_end(*undo, mtr))
|
||||
return err;
|
||||
}
|
||||
|
||||
if (trx_undo_t *undo = trx.rsegs.m_noredo.undo)
|
||||
if (trx_undo_t *undo = trx->rsegs.m_noredo.undo)
|
||||
{
|
||||
ut_ad(undo->rseg == trx.rsegs.m_noredo.rseg);
|
||||
if (dberr_t err= trx_undo_truncate_end(*undo, trx.undo_no, true))
|
||||
ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
|
||||
if (dberr_t err= trx_undo_truncate_end(*undo, mtr))
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -904,7 +904,7 @@ trx_undo_truncate_start(
|
||||
{
|
||||
trx_undo_rec_t* rec;
|
||||
trx_undo_rec_t* last_rec;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
|
||||
ut_ad(rseg->is_persistent());
|
||||
|
||||
@@ -965,7 +965,7 @@ done:
|
||||
trx_undo_t *
|
||||
trx_undo_mem_create_at_db_start(trx_rseg_t *rseg, ulint id, uint32_t page_no)
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_t mtr{nullptr};
|
||||
XID xid;
|
||||
|
||||
ut_ad(id < TRX_RSEG_N_SLOTS);
|
||||
@@ -1190,17 +1190,16 @@ trx_undo_mem_init_for_reuse(
|
||||
}
|
||||
|
||||
/** Create an undo log.
|
||||
@param[in,out] trx transaction
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[out] err error code
|
||||
@param[in,out] rseg rollback segment
|
||||
@param[out] undo undo log object
|
||||
@param[out] err error code
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return undo log block
|
||||
@retval NULL on failure */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
buf_block_t*
|
||||
trx_undo_create(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
|
||||
dberr_t* err, mtr_t* mtr)
|
||||
trx_undo_create(mtr_t *mtr, dberr_t *err, trx_rseg_t* rseg, trx_undo_t** undo)
|
||||
noexcept
|
||||
{
|
||||
ulint id;
|
||||
buf_block_t* block = rseg->get(mtr, err);
|
||||
@@ -1215,6 +1214,7 @@ trx_undo_create(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
|
||||
|
||||
rseg->curr_size++;
|
||||
|
||||
trx_t *const trx{mtr->trx};
|
||||
uint16_t offset = trx_undo_header_create(block, trx->id, mtr);
|
||||
|
||||
*undo = trx_undo_mem_create(rseg, id, trx->id, &trx->xid,
|
||||
@@ -1244,18 +1244,17 @@ trx_undo_create(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
|
||||
/*================ UNDO LOG ASSIGNMENT AND CLEANUP =====================*/
|
||||
|
||||
/** Reuse a cached undo log block.
|
||||
@param[in,out] trx transaction
|
||||
@param[in,out] rseg rollback segment
|
||||
@param[out] pundo the undo log memory object
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[out] err error code
|
||||
@param[in,out] rseg rollback segment
|
||||
@param[out] pundo the undo log memory object
|
||||
@return the undo log block
|
||||
@retval NULL if none cached */
|
||||
static
|
||||
buf_block_t*
|
||||
trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
|
||||
mtr_t* mtr, dberr_t *err)
|
||||
static buf_block_t *trx_undo_reuse_cached(mtr_t *mtr, dberr_t *err,
|
||||
trx_rseg_t *rseg, trx_undo_t **pundo)
|
||||
noexcept
|
||||
{
|
||||
trx_t *const trx{mtr->trx};
|
||||
ut_ad(rseg->is_persistent());
|
||||
ut_ad(rseg->is_referenced());
|
||||
ut_ad(rseg == trx->rsegs.m_redo.rseg);
|
||||
@@ -1312,16 +1311,15 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
|
||||
|
||||
/** Assign an undo log for a persistent transaction.
|
||||
A new undo log is created or a cached undo log reused.
|
||||
@param[in,out] trx transaction
|
||||
@param[out] err error code
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[out] err error code
|
||||
@return the undo log block
|
||||
@retval NULL on error */
|
||||
buf_block_t*
|
||||
trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
|
||||
buf_block_t *trx_undo_assign(mtr_t *mtr, dberr_t *err) noexcept
|
||||
{
|
||||
ut_ad(mtr->get_log_mode() == MTR_LOG_ALL);
|
||||
|
||||
trx_t *const trx{mtr->trx};
|
||||
trx_undo_t* undo = trx->rsegs.m_redo.undo;
|
||||
buf_block_t* block;
|
||||
|
||||
@@ -1340,12 +1338,11 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
|
||||
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
|
||||
|
||||
rseg->latch.wr_lock(SRW_LOCK_CALL);
|
||||
block = trx_undo_reuse_cached(
|
||||
trx, rseg, &trx->rsegs.m_redo.undo, mtr, err);
|
||||
block = trx_undo_reuse_cached(mtr, err, rseg, &trx->rsegs.m_redo.undo);
|
||||
|
||||
if (!block) {
|
||||
block = trx_undo_create(trx, rseg, &trx->rsegs.m_redo.undo,
|
||||
err, mtr);
|
||||
block = trx_undo_create(mtr, err, rseg,
|
||||
&trx->rsegs.m_redo.undo);
|
||||
ut_ad(!block == (*err != DB_SUCCESS));
|
||||
if (!block) {
|
||||
goto func_exit;
|
||||
@@ -1362,23 +1359,22 @@ func_exit:
|
||||
/** Assign an undo log for a transaction.
|
||||
A new undo log is created or a cached undo log reused.
|
||||
@tparam is_temp whether this is temporary undo log
|
||||
@param[in,out] trx transaction
|
||||
@param[in] rseg rollback segment
|
||||
@param[out] undo the undo log
|
||||
@param[in,out] mtr mini-transaction
|
||||
@param[out] err error code
|
||||
@param[in] rseg rollback segment
|
||||
@param[out] undo the undo log
|
||||
@return the undo log block
|
||||
@retval nullptr on error */
|
||||
template<bool is_temp>
|
||||
buf_block_t*
|
||||
trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
|
||||
mtr_t *mtr, dberr_t *err)
|
||||
trx_undo_assign_low(mtr_t *mtr, dberr_t *err,
|
||||
trx_rseg_t *rseg, trx_undo_t **undo)
|
||||
{
|
||||
ut_ad(is_temp == (rseg == trx->rsegs.m_noredo.rseg));
|
||||
ut_ad(is_temp || rseg == trx->rsegs.m_redo.rseg);
|
||||
ut_ad(is_temp == (rseg == mtr->trx->rsegs.m_noredo.rseg));
|
||||
ut_ad(is_temp || rseg == mtr->trx->rsegs.m_redo.rseg);
|
||||
ut_ad(undo == (is_temp
|
||||
? &trx->rsegs.m_noredo.undo
|
||||
: &trx->rsegs.m_redo.undo));
|
||||
? &mtr->trx->rsegs.m_noredo.undo
|
||||
: &mtr->trx->rsegs.m_redo.undo));
|
||||
ut_ad(mtr->get_log_mode()
|
||||
== (is_temp ? MTR_LOG_NO_REDO : MTR_LOG_ALL));
|
||||
buf_block_t* block;
|
||||
@@ -1404,40 +1400,36 @@ trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
|
||||
if (is_temp) {
|
||||
ut_ad(!UT_LIST_GET_LEN(rseg->undo_cached));
|
||||
} else {
|
||||
block = trx_undo_reuse_cached(trx, rseg, undo, mtr, err);
|
||||
block = trx_undo_reuse_cached(mtr, err, rseg, undo);
|
||||
if (block) {
|
||||
goto got_block;
|
||||
}
|
||||
}
|
||||
block = trx_undo_create(trx, rseg, undo, err, mtr);
|
||||
block = trx_undo_create(mtr, err, rseg, undo);
|
||||
ut_ad(!block == (*err != DB_SUCCESS));
|
||||
if (!block) {
|
||||
goto func_exit;
|
||||
if (block) {
|
||||
got_block:
|
||||
UT_LIST_ADD_FIRST(rseg->undo_list, *undo);
|
||||
}
|
||||
|
||||
got_block:
|
||||
UT_LIST_ADD_FIRST(rseg->undo_list, *undo);
|
||||
|
||||
func_exit:
|
||||
rseg->latch.wr_unlock();
|
||||
return block;
|
||||
}
|
||||
|
||||
template buf_block_t*
|
||||
trx_undo_assign_low<false>(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
|
||||
mtr_t *mtr, dberr_t *err);
|
||||
trx_undo_assign_low<false>(mtr_t *mtr, dberr_t *err,
|
||||
trx_rseg_t *rseg, trx_undo_t **undo);
|
||||
template buf_block_t*
|
||||
trx_undo_assign_low<true>(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
|
||||
mtr_t *mtr, dberr_t *err);
|
||||
trx_undo_assign_low<true>(mtr_t *mtr, dberr_t *err,
|
||||
trx_rseg_t *rseg, trx_undo_t **undo);
|
||||
|
||||
/** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK.
|
||||
@param[in,out] trx transaction
|
||||
@param[in,out] undo undo log
|
||||
@param[in] rollback false=XA PREPARE, true=XA ROLLBACK
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return undo log segment header page, x-latched */
|
||||
void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback,
|
||||
mtr_t *mtr)
|
||||
void trx_undo_set_state_at_prepare(trx_undo_t *undo, bool rollback, mtr_t *mtr)
|
||||
noexcept
|
||||
{
|
||||
ut_a(undo->id < TRX_RSEG_N_SLOTS);
|
||||
|
||||
@@ -1462,7 +1454,7 @@ void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback,
|
||||
/*------------------------------*/
|
||||
ut_ad(undo->state == TRX_UNDO_ACTIVE);
|
||||
undo->state = TRX_UNDO_PREPARED;
|
||||
undo->xid = trx->xid;
|
||||
undo->xid = mtr->trx->xid;
|
||||
/*------------------------------*/
|
||||
|
||||
mtr->write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
|
||||
|
||||
@@ -31,12 +31,13 @@ buf_block_t *buf_page_get_gen(const page_id_t, ulint, rw_lock_type_t,
|
||||
{ return nullptr; }
|
||||
bool buf_page_make_young_if_needed(buf_page_t*) { return false; }
|
||||
|
||||
mtr_t::mtr_t()= default;
|
||||
mtr_t::mtr_t(trx_t *trx) : trx(trx) {}
|
||||
mtr_t::~mtr_t()= default;
|
||||
void mtr_t::start() {}
|
||||
void mtr_t::commit() {}
|
||||
void mtr_t::rollback_to_savepoint(ulint, ulint) {}
|
||||
void small_vector_base::grow_by_1(void *, size_t) noexcept {}
|
||||
void buf_inc_get(trx_t*) noexcept {}
|
||||
|
||||
void sql_print_error(const char*, ...) {}
|
||||
ulint ut_find_prime(ulint n) { return n; }
|
||||
|
||||
Reference in New Issue
Block a user