diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 18997b745e7..4bba3ba4e40 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -2445,7 +2445,7 @@ lsn_t xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) { lsn_t scanned_lsn = start_lsn; - const byte* log_block = log_sys->buf; + const byte* log_block = log_sys.buf; bool more_data = false; for (ulint scanned_checkpoint = 0; @@ -2494,7 +2494,7 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) recv_sys_justify_left_parsing_buf(); - log_sys->log.scanned_lsn = scanned_lsn; + log_sys.log.scanned_lsn = scanned_lsn; end_lsn = copy == COPY_LAST ? ut_uint64_align_up(scanned_lsn, OS_FILE_LOG_BLOCK_SIZE) @@ -2502,10 +2502,10 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) if (ulint write_size = ulint(end_lsn - start_lsn)) { if (srv_encrypt_log) { - log_crypt(log_sys->buf, start_lsn, write_size); + log_crypt(log_sys.buf, start_lsn, write_size); } - if (ds_write(dst_log_file, log_sys->buf, write_size)) { + if (ds_write(dst_log_file, log_sys.buf, write_size)) { msg("mariabackup: Error: " "write to logfile failed\n"); return(0); @@ -2544,7 +2544,7 @@ xtrabackup_copy_logfile(copy_logfile copy) lsn_t lsn= start_lsn; for(int retries= 0; retries < 100; retries++) { - if (log_group_read_log_seg(log_sys->buf, &log_sys->log, + if (log_group_read_log_seg(log_sys.buf, &log_sys.log, &lsn, end_lsn)){ break; } @@ -2565,7 +2565,7 @@ xtrabackup_copy_logfile(copy_logfile copy) } } while (start_lsn == end_lsn); - ut_ad(start_lsn == log_sys->log.scanned_lsn); + ut_ad(start_lsn == log_sys.log.scanned_lsn); msg_ts(">> log scanned up to (" LSN_PF ")\n", start_lsn); @@ -3656,9 +3656,9 @@ xtrabackup_backup_low() log_mutex_enter(); if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS - && log_sys->log.format != 0) { + && log_sys.log.format != 0) { metadata_to_lsn = mach_read_from_8( - log_sys->checkpoint_buf + LOG_CHECKPOINT_LSN); + log_sys.checkpoint_buf + LOG_CHECKPOINT_LSN); msg("mariabackup: The latest check point" " (for incremental): '" LSN_PF "'\n", metadata_to_lsn); @@ -3818,7 +3818,7 @@ fail: os_aio_init(srv_n_read_io_threads, srv_n_write_io_threads, SRV_MAX_N_PENDING_SYNC_IOS); - log_sys_init(); + log_sys.create(); log_init(srv_n_log_files); fil_space_t* space = fil_space_create( "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, @@ -3894,7 +3894,7 @@ log_fail: goto fail; } - if (log_sys->log.format == 0) { + if (log_sys.log.format == 0) { old_format: msg("mariabackup: Error: cannot process redo log" " before MariaDB 10.2.2\n"); @@ -3902,14 +3902,14 @@ old_format: goto log_fail; } - ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) + ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT) & ~LOG_HEADER_FORMAT_ENCRYPTED)); - const byte* buf = log_sys->checkpoint_buf; + const byte* buf = log_sys.checkpoint_buf; reread_log_header: - checkpoint_lsn_start = log_sys->log.lsn; - checkpoint_no_start = log_sys->next_checkpoint_no; + checkpoint_lsn_start = log_sys.log.lsn; + checkpoint_no_start = log_sys.next_checkpoint_no; err = recv_find_max_checkpoint(&max_cp_field); @@ -3917,14 +3917,14 @@ reread_log_header: goto log_fail; } - if (log_sys->log.format == 0) { + if (log_sys.log.format == 0) { goto old_format; } - ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) + ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT) & ~LOG_HEADER_FORMAT_ENCRYPTED)); - log_group_header_read(&log_sys->log, max_cp_field); + log_group_header_read(&log_sys.log, max_cp_field); if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) { goto reread_log_header; @@ -3950,7 +3950,7 @@ reread_log_header: /* label it */ byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr[OS_FILE_LOG_BLOCK_SIZE]; memset(log_hdr, 0, sizeof log_hdr); - mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys->log.format); + mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys.log.format); mach_write_to_8(LOG_HEADER_START_LSN + log_hdr, checkpoint_lsn_start); strcpy(reinterpret_cast(LOG_HEADER_CREATOR + log_hdr), "Backup " MYSQL_SERVER_VERSION); @@ -4936,7 +4936,7 @@ xtrabackup_prepare_func(char** argv) ut_d(sync_check_enable()); ut_crc32_init(); recv_sys_init(); - log_sys_init(); + log_sys.create(); recv_recovery_on = true; #ifdef WITH_INNODB_DISALLOW_WRITES @@ -4970,7 +4970,7 @@ xtrabackup_prepare_func(char** argv) os_event_destroy(srv_allow_writes_event); #endif innodb_free_param(); - log_shutdown(); + log_sys.close(); sync_check_close(); if (!ok) goto error_cleanup; } diff --git a/mysql-test/suite/encryption/t/innodb-scrub-log.test b/mysql-test/suite/encryption/t/innodb-scrub-log.test index 36ecc88b99a..e8149b6b3ff 100644 --- a/mysql-test/suite/encryption/t/innodb-scrub-log.test +++ b/mysql-test/suite/encryption/t/innodb-scrub-log.test @@ -1,7 +1,7 @@ --source include/have_innodb.inc # -# MDEV-11705: InnoDB: Failing assertion: (&log_sys->mutex)->is_owned() if server started with innodb-scrub-log +# MDEV-11705: InnoDB: Failing assertion: (&log_sys.mutex)->is_owned() if server started with innodb-scrub-log # create table t1(a int not null primary key auto_increment, diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc index 63194060d8a..f8c7271953b 100644 --- a/storage/innobase/btr/btr0bulk.cc +++ b/storage/innobase/btr/btr0bulk.cc @@ -725,7 +725,7 @@ BtrBulk::pageCommit( void BtrBulk::logFreeCheck() { - if (log_sys->check_flush_or_checkpoint) { + if (log_sys.check_flush_or_checkpoint) { release(); log_free_check(); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 6e1c5c1ff5b..12d7f275e1e 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1191,11 +1191,11 @@ buf_madvise_do_dump() buf_pool_t* buf_pool; buf_chunk_t* chunk; - /* mirrors allocation in log_sys_init() */ - if (log_sys->buf) { - ret+= madvise(log_sys->first_in_use - ? log_sys->buf - : log_sys->buf - srv_log_buffer_size, + /* mirrors allocation in log_t::create() */ + if (log_sys.buf) { + ret+= madvise(log_sys.first_in_use + ? log_sys.buf + : log_sys.buf - srv_log_buffer_size, srv_log_buffer_size * 2, MADV_DODUMP); } diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 6e33aab9005..2f9cb89e2b6 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2439,7 +2439,7 @@ page_cleaner_flush_pages_recommendation( cur_lsn = log_get_lsn_nowait(); - /* log_get_lsn_nowait tries to get log_sys->mutex with + /* log_get_lsn_nowait tries to get log_sys.mutex with mutex_enter_nowait, if this does not succeed function returns 0, do not use that value to update stats. */ if (cur_lsn == 0) { diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index b76a54d3886..9c1d52e8257 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -3122,7 +3122,7 @@ func_exit: log_mutex_enter(); } - /* log_sys->mutex is above fil_system.mutex in the latching order */ + /* log_sys.mutex is above fil_system.mutex in the latching order */ ut_ad(log_mutex_own()); mutex_enter(&fil_system.mutex); ut_ad(space->name == old_space_name); @@ -5120,12 +5120,12 @@ fil_names_dirty( { ut_ad(log_mutex_own()); ut_ad(recv_recovery_is_on()); - ut_ad(log_sys->lsn != 0); + ut_ad(log_sys.lsn != 0); ut_ad(space->max_lsn == 0); ut_d(fil_space_validate_for_mtr_commit(space)); UT_LIST_ADD_LAST(fil_system.named_spaces, space); - space->max_lsn = log_sys->lsn; + space->max_lsn = log_sys.lsn; } /** Write MLOG_FILE_NAME records when a non-predefined persistent @@ -5140,7 +5140,7 @@ fil_names_dirty_and_write( { ut_ad(log_mutex_own()); ut_d(fil_space_validate_for_mtr_commit(space)); - ut_ad(space->max_lsn == log_sys->lsn); + ut_ad(space->max_lsn == log_sys.lsn); UT_LIST_ADD_LAST(fil_system.named_spaces, space); fil_names_write(space, mtr); @@ -5177,8 +5177,8 @@ fil_names_clear( ut_ad(log_mutex_own()); - if (log_sys->append_on_checkpoint) { - mtr_write_log(log_sys->append_on_checkpoint); + if (log_sys.append_on_checkpoint) { + mtr_write_log(log_sys.append_on_checkpoint); do_write = true; } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 1989802bd15..1a433bf6112 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -18562,16 +18562,16 @@ checkpoint_now_set( check function */ { if (*(my_bool*) save) { - while (log_sys->last_checkpoint_lsn + while (log_sys.last_checkpoint_lsn + SIZE_OF_MLOG_CHECKPOINT - + (log_sys->append_on_checkpoint != NULL - ? log_sys->append_on_checkpoint->size() : 0) - < log_sys->lsn) { + + (log_sys.append_on_checkpoint != NULL + ? log_sys.append_on_checkpoint->size() : 0) + < log_sys.lsn) { log_make_checkpoint_at(LSN_MAX, TRUE); fil_flush_file_spaces(FIL_TYPE_LOG); } - dberr_t err = fil_write_flushed_lsn(log_sys->lsn); + dberr_t err = fil_write_flushed_lsn(log_sys.lsn); if (err != DB_SUCCESS) { ib::warn() << "Checkpoint set failed " << err; diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 2faee1363c3..8f4e9d10fd9 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -82,7 +82,7 @@ struct fil_space_t { /*!< LSN of the most recent fil_names_write_if_was_clean(). Reset to 0 by fil_names_clear(). - Protected by log_sys->mutex. + Protected by log_sys.mutex. If and only if this is nonzero, the tablespace will be in named_spaces. */ bool stop_ios;/*!< true if we want to rename the @@ -286,7 +286,7 @@ struct fil_space_t { struct fil_node_t { /** tablespace containing this file */ fil_space_t* space; - /** file name; protected by fil_system.mutex and log_sys->mutex. */ + /** file name; protected by fil_system.mutex and log_sys.mutex. */ char* name; /** file handle (valid if is_open) */ pfs_os_file_t handle; @@ -628,7 +628,7 @@ public: for which a MLOG_FILE_NAME record has been written since the latest redo log checkpoint. - Protected only by log_sys->mutex. */ + Protected only by log_sys.mutex. */ UT_LIST_BASE_NODE_T(fil_space_t) rotation_list; /*!< list of all file spaces needing key rotation.*/ @@ -1326,8 +1326,8 @@ fil_names_write_if_was_clean( } const bool was_clean = space->max_lsn == 0; - ut_ad(space->max_lsn <= log_sys->lsn); - space->max_lsn = log_sys->lsn; + ut_ad(space->max_lsn <= log_sys.lsn); + space->max_lsn = log_sys.lsn; if (was_clean) { fil_names_dirty_and_write(space, mtr); diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index 093044aa4fc..9555fe81025 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -56,7 +56,7 @@ step which modifies the database, is started */ typedef ulint (*log_checksum_func_t)(const byte* log_block); /** Pointer to the log checksum calculation function. Protected with -log_sys->mutex. */ +log_sys.mutex. */ extern log_checksum_func_t log_checksum_algorithm_ptr; /** Append a string to the log. @@ -136,7 +136,7 @@ log_get_flush_lsn(void); /*=============*/ /**************************************************************** Gets the log group capacity. It is OK to read the value without -holding log_sys->mutex because it is constant. +holding log_sys.mutex because it is constant. @return log group capacity */ UNIV_INLINE lsn_t @@ -150,9 +150,6 @@ UNIV_INLINE lsn_t log_get_max_modified_age_async(void); /*================================*/ -/** Initializes the redo logging subsystem. */ -void -log_sys_init(); /** Initialize the redo log. @param[in] n_files number of files */ @@ -233,7 +230,7 @@ shutdown. This function also writes all log in log files to the log archive. */ void logs_empty_and_mark_files_at_shutdown(void); /*=======================================*/ -/** Read a log group header page to log_sys->checkpoint_buf. +/** Read a log group header page to log_sys.checkpoint_buf. @param[in] group log group @param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */ void @@ -406,9 +403,6 @@ Closes all log groups. */ void log_group_close_all(void); /*=====================*/ -/** Shut down the redo log subsystem. */ -void -log_shutdown(); /** Whether to generate and require checksums on the redo log pages */ extern my_bool innodb_log_checksums; @@ -443,7 +437,7 @@ extern my_bool innodb_log_checksums; from this offset in this log block, if value not 0 */ #define LOG_BLOCK_CHECKPOINT_NO 8 /* 4 lower bytes of the value of - log_sys->next_checkpoint_no when the + log_sys.next_checkpoint_no when the log block was last written to: if the block has not yet been written full, this value is only updated before a @@ -544,9 +538,9 @@ typedef ib_mutex_t FlushOrderMutex; /** Log group consists of a number of log files, each of the same size; a log group is implemented as a space in the sense of the module fil0fil. -Currently, this is only protected by log_sys->mutex. However, in the case +Currently, this is only protected by log_sys.mutex. However, in the case of log_write_up_to(), we will access some members only with the protection -of log_sys->write_mutex, which should affect nothing for now. */ +of log_sys.write_mutex, which should affect nothing for now. */ struct log_group_t{ /** number of files in the group */ ulint n_files; @@ -588,25 +582,22 @@ struct log_group_t{ /** Redo log buffer */ struct log_t{ - char pad1[CACHE_LINE_SIZE]; - /*!< Padding to prevent other memory - update hotspots from residing on the - same memory cache line */ + MY_ALIGNED(CACHE_LINE_SIZE) lsn_t lsn; /*!< log sequence number */ ulong buf_free; /*!< first free offset within the log buffer in use */ - char pad2[CACHE_LINE_SIZE];/*!< Padding */ + MY_ALIGNED(CACHE_LINE_SIZE) LogSysMutex mutex; /*!< mutex protecting the log */ - char pad3[CACHE_LINE_SIZE]; /*!< Padding */ + MY_ALIGNED(CACHE_LINE_SIZE) LogSysMutex write_mutex; /*!< mutex protecting writing to log file and accessing to log_group_t */ - char pad4[CACHE_LINE_SIZE];/*!< Padding */ + MY_ALIGNED(CACHE_LINE_SIZE) FlushOrderMutex log_flush_order_mutex;/*!< mutex to serialize access to the flush list when we are putting dirty blocks in the list. The idea behind this mutex is to be able - to release log_sys->mutex during + to release log_sys.mutex during mtr_commit and still ensure that insertions in the flush_list happen in the LSN order. */ @@ -636,7 +627,7 @@ struct log_t{ peeked at by log_free_check(), which does not reserve the log mutex */ /** the redo log */ - log_group_t log; + log_group_t log; /** The fields involved in the log buffer flush @{ */ @@ -707,7 +698,7 @@ struct log_t{ /*!< extra redo log records to write during a checkpoint, or NULL if none. The pointer is protected by - log_sys->mutex, and the data must + log_sys.mutex, and the data must remain constant as long as this pointer is not NULL. */ ulint n_pending_checkpoint_writes; @@ -717,62 +708,79 @@ struct log_t{ checkpoint write is running; a thread should wait for this without owning the log mutex */ - byte* checkpoint_buf_ptr;/* unaligned checkpoint header */ - byte* checkpoint_buf; /*!< checkpoint header is read to this - buffer */ + + /** buffer for checkpoint header */ + MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) + byte checkpoint_buf[OS_FILE_LOG_BLOCK_SIZE]; /* @} */ - /** @return whether the redo log is encrypted */ - bool is_encrypted() const - { - return(log.is_encrypted()); - } +private: + bool m_initialised; +public: + /** + Constructor. + + Some members may require late initialisation, thus we just mark object as + uninitialised. Real initialisation happens in create(). + */ + log_t(): m_initialised(false) {} + + /** @return whether the redo log is encrypted */ + bool is_encrypted() const { return(log.is_encrypted()); } + + bool is_initialised() { return m_initialised; } + + /** Initialise the redo log subsystem. */ + void create(); + + /** Shut down the redo log subsystem. */ + void close(); }; /** Redo log system */ -extern log_t* log_sys; +extern log_t log_sys; /** Test if flush order mutex is owned. */ #define log_flush_order_mutex_own() \ - mutex_own(&log_sys->log_flush_order_mutex) + mutex_own(&log_sys.log_flush_order_mutex) /** Acquire the flush order mutex. */ #define log_flush_order_mutex_enter() do { \ - mutex_enter(&log_sys->log_flush_order_mutex); \ + mutex_enter(&log_sys.log_flush_order_mutex); \ } while (0) /** Release the flush order mutex. */ # define log_flush_order_mutex_exit() do { \ - mutex_exit(&log_sys->log_flush_order_mutex); \ + mutex_exit(&log_sys.log_flush_order_mutex); \ } while (0) /** Test if log sys mutex is owned. */ -#define log_mutex_own() mutex_own(&log_sys->mutex) +#define log_mutex_own() mutex_own(&log_sys.mutex) /** Test if log sys write mutex is owned. */ -#define log_write_mutex_own() mutex_own(&log_sys->write_mutex) +#define log_write_mutex_own() mutex_own(&log_sys.write_mutex) /** Acquire the log sys mutex. */ -#define log_mutex_enter() mutex_enter(&log_sys->mutex) +#define log_mutex_enter() mutex_enter(&log_sys.mutex) /** Acquire the log sys write mutex. */ -#define log_write_mutex_enter() mutex_enter(&log_sys->write_mutex) +#define log_write_mutex_enter() mutex_enter(&log_sys.write_mutex) /** Acquire all the log sys mutexes. */ #define log_mutex_enter_all() do { \ - mutex_enter(&log_sys->write_mutex); \ - mutex_enter(&log_sys->mutex); \ + mutex_enter(&log_sys.write_mutex); \ + mutex_enter(&log_sys.mutex); \ } while (0) /** Release the log sys mutex. */ -#define log_mutex_exit() mutex_exit(&log_sys->mutex) +#define log_mutex_exit() mutex_exit(&log_sys.mutex) /** Release the log sys write mutex.*/ -#define log_write_mutex_exit() mutex_exit(&log_sys->write_mutex) +#define log_write_mutex_exit() mutex_exit(&log_sys.write_mutex) /** Release all the log sys mutexes. */ #define log_mutex_exit_all() do { \ - mutex_exit(&log_sys->mutex); \ - mutex_exit(&log_sys->write_mutex); \ + mutex_exit(&log_sys.mutex); \ + mutex_exit(&log_sys.write_mutex); \ } while (0) /** Calculate the offset of an lsn within a log group. diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic index b491a06eb6a..236ef5594e3 100644 --- a/storage/innobase/include/log0log.ic +++ b/storage/innobase/include/log0log.ic @@ -330,15 +330,15 @@ log_reserve_and_write_fast( len - SIZE_OF_MLOG_CHECKPOINT] ? 0 : 1 - + mach_get_compressed_size(log_sys->lsn >> 32) - + mach_get_compressed_size(log_sys->lsn & 0xFFFFFFFFUL); + + mach_get_compressed_size(log_sys.lsn >> 32) + + mach_get_compressed_size(log_sys.lsn & 0xFFFFFFFFUL); #endif /* UNIV_LOG_LSN_DEBUG */ const ulint data_len = len #ifdef UNIV_LOG_LSN_DEBUG + lsn_len #endif /* UNIV_LOG_LSN_DEBUG */ - + log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE; + + log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE; if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) { @@ -348,44 +348,44 @@ log_reserve_and_write_fast( return(0); } - *start_lsn = log_sys->lsn; + *start_lsn = log_sys.lsn; #ifdef UNIV_LOG_LSN_DEBUG if (lsn_len) { /* Write the LSN pseudo-record. */ - byte* b = &log_sys->buf[log_sys->buf_free]; + byte* b = &log_sys.buf[log_sys.buf_free]; *b++ = MLOG_LSN | (MLOG_SINGLE_REC_FLAG & *(const byte*) str); /* Write the LSN in two parts, as a pseudo page number and space id. */ - b += mach_write_compressed(b, log_sys->lsn >> 32); - b += mach_write_compressed(b, log_sys->lsn & 0xFFFFFFFFUL); - ut_a(b - lsn_len == &log_sys->buf[log_sys->buf_free]); + b += mach_write_compressed(b, log_sys.lsn >> 32); + b += mach_write_compressed(b, log_sys.lsn & 0xFFFFFFFFUL); + ut_a(b - lsn_len == &log_sys.buf[log_sys.buf_free]); ::memcpy(b, str, len); len += lsn_len; } else #endif /* UNIV_LOG_LSN_DEBUG */ - memcpy(log_sys->buf + log_sys->buf_free, str, len); + memcpy(log_sys.buf + log_sys.buf_free, str, len); log_block_set_data_len( reinterpret_cast(ut_align_down( - log_sys->buf + log_sys->buf_free, + log_sys.buf + log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE)), data_len); - log_sys->buf_free += ulong(len); + log_sys.buf_free += ulong(len); - ut_ad(log_sys->buf_free <= srv_log_buffer_size); + ut_ad(log_sys.buf_free <= srv_log_buffer_size); - log_sys->lsn += len; + log_sys.lsn += len; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); + log_sys.lsn - log_sys.last_checkpoint_lsn); - return(log_sys->lsn); + return(log_sys.lsn); } /************************************************************//** @@ -400,7 +400,7 @@ log_get_lsn(void) log_mutex_enter(); - lsn = log_sys->lsn; + lsn = log_sys.lsn; log_mutex_exit(); @@ -418,7 +418,7 @@ log_get_flush_lsn(void) log_mutex_enter(); - lsn = log_sys->flushed_to_disk_lsn; + lsn = log_sys.flushed_to_disk_lsn; log_mutex_exit(); @@ -435,11 +435,11 @@ log_get_lsn_nowait(void) { lsn_t lsn=0; - if (!mutex_enter_nowait(&(log_sys->mutex))) { + if (!mutex_enter_nowait(&(log_sys.mutex))) { - lsn = log_sys->lsn; + lsn = log_sys.lsn; - mutex_exit(&(log_sys->mutex)); + mutex_exit(&(log_sys.mutex)); } return(lsn); @@ -447,14 +447,14 @@ log_get_lsn_nowait(void) /**************************************************************** Gets the log group capacity. It is OK to read the value without -holding log_sys->mutex because it is constant. +holding log_sys.mutex because it is constant. @return log group capacity */ UNIV_INLINE lsn_t log_get_capacity(void) /*==================*/ { - return(log_sys->log_group_capacity); + return(log_sys.log_group_capacity); } /**************************************************************** @@ -466,7 +466,7 @@ lsn_t log_get_max_modified_age_async(void) /*================================*/ { - return(log_sys->max_modified_age_async); + return(log_sys.max_modified_age_async); } /***********************************************************************//** @@ -498,7 +498,7 @@ log_free_check(void) sync_allowed_latches(latches, latches + UT_ARR_SIZE(latches)))); - if (log_sys->check_flush_or_checkpoint) { + if (log_sys.check_flush_or_checkpoint) { log_check_margins(); } diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 19e13fcdcc6..50d581e9101 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -331,7 +331,7 @@ extern bool recv_no_ibuf_operations; extern bool recv_needed_recovery; #ifdef UNIV_DEBUG /** TRUE if writing to the redo log (mtr_commit) is forbidden. -Protected by log_sys->mutex. */ +Protected by log_sys.mutex. */ extern bool recv_no_log_write; #endif /* UNIV_DEBUG */ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 29492aa7bb6..17f84c69fbb 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -80,7 +80,7 @@ struct srv_stats_t lsn_ctr_1_t os_log_written; /** Number of writes being done to the log files. - Protected by log_sys->write_mutex. */ + Protected by log_sys.write_mutex. */ ulint_ctr_1_t os_log_pending_writes; /** We increase this counter, when we don't have enough diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc index 9cd06bc0c6f..366c9672e45 100644 --- a/storage/innobase/log/log0crypt.cc +++ b/storage/innobase/log/log0crypt.cc @@ -219,7 +219,7 @@ bool log_crypt_init() { ut_ad(log_mutex_own()); - ut_ad(log_sys->is_encrypted()); + ut_ad(log_sys.is_encrypted()); info.key_version = encryption_key_get_latest_version( LOG_DEFAULT_ENCRYPTION_KEY); diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 8b36f901e07..5db40967ab4 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -81,7 +81,7 @@ reduce the size of the log. */ /** Redo log system */ -log_t* log_sys = NULL; +log_t log_sys; /** Whether to generate and require checksums on the redo log pages */ my_bool innodb_log_checksums; @@ -142,7 +142,7 @@ log_io_complete_checkpoint(void); /*============================*/ /****************************************************************//** -Returns the oldest modified block lsn in the pool, or log_sys->lsn if none +Returns the oldest modified block lsn in the pool, or log_sys.lsn if none exists. @return LSN of oldest modification */ static @@ -158,7 +158,7 @@ log_buf_pool_get_oldest_modification(void) if (!lsn) { - lsn = log_sys->lsn; + lsn = log_sys.lsn; } return(lsn); @@ -172,7 +172,7 @@ void log_buffer_extend(ulong len) log_mutex_enter_all(); - while (log_sys->is_extending) { + while (log_sys.is_extending) { /* Another thread is trying to extend already. Needs to wait for. */ log_mutex_exit_all(); @@ -198,11 +198,11 @@ void log_buffer_extend(ulong len) << srv_log_buffer_size << " / 2). Trying to extend it."; } - log_sys->is_extending = true; + log_sys.is_extending = true; - while (ut_calc_align_down(log_sys->buf_free, + while (ut_calc_align_down(log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE) - != ut_calc_align_down(log_sys->buf_next_to_write, + != ut_calc_align_down(log_sys.buf_next_to_write, OS_FILE_LOG_BLOCK_SIZE)) { /* Buffer might have >1 blocks to write still. */ log_mutex_exit_all(); @@ -213,39 +213,39 @@ void log_buffer_extend(ulong len) } ulong move_start = ut_calc_align_down( - log_sys->buf_free, + log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE); - ulong move_end = log_sys->buf_free; + ulong move_end = log_sys.buf_free; /* store the last log block in buffer */ - ut_memcpy(tmp_buf, log_sys->buf + move_start, + ut_memcpy(tmp_buf, log_sys.buf + move_start, move_end - move_start); - log_sys->buf_free -= move_start; - log_sys->buf_next_to_write -= move_start; + log_sys.buf_free -= move_start; + log_sys.buf_next_to_write -= move_start; /* free previous after getting the right address */ - if (!log_sys->first_in_use) { - log_sys->buf -= srv_log_buffer_size; + if (!log_sys.first_in_use) { + log_sys.buf -= srv_log_buffer_size; } - ut_free_dodump(log_sys->buf, srv_log_buffer_size * 2); + ut_free_dodump(log_sys.buf, srv_log_buffer_size * 2); /* reallocate log buffer */ srv_log_buffer_size = len; - log_sys->buf = static_cast( + log_sys.buf = static_cast( ut_malloc_dontdump(srv_log_buffer_size * 2)); - log_sys->first_in_use = true; + log_sys.first_in_use = true; - log_sys->max_buf_free = srv_log_buffer_size / LOG_BUF_FLUSH_RATIO + log_sys.max_buf_free = srv_log_buffer_size / LOG_BUF_FLUSH_RATIO - LOG_BUF_FLUSH_MARGIN; /* restore the last log block */ - ut_memcpy(log_sys->buf, tmp_buf, move_end - move_start); + ut_memcpy(log_sys.buf, tmp_buf, move_end - move_start); - ut_ad(log_sys->is_extending); - log_sys->is_extending = false; + ut_ad(log_sys.is_extending); + log_sys.is_extending = false; log_mutex_exit_all(); @@ -269,7 +269,7 @@ log_calculate_actual_len( - (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE); /* actual data length in last block already written */ - ulint extra_len = (log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE); + ulint extra_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE); ut_ad(extra_len >= LOG_BLOCK_HDR_SIZE); extra_len -= LOG_BLOCK_HDR_SIZE; @@ -294,7 +294,7 @@ log_margin_checkpoint_age( ut_ad(log_mutex_own()); - if (margin > log_sys->log_group_capacity) { + if (margin > log_sys.log_group_capacity) { /* return with warning output to avoid deadlock */ if (!log_has_printed_chkp_margine_warning || difftime(time(NULL), @@ -306,7 +306,7 @@ log_margin_checkpoint_age( " small for the single transaction log (size=" << len << "). So, the last checkpoint age" " might exceed the log group capacity " - << log_sys->log_group_capacity << "."; + << log_sys.log_group_capacity << "."; } return; @@ -315,20 +315,20 @@ log_margin_checkpoint_age( /* Our margin check should ensure that we never reach this condition. Try to do checkpoint once. We cannot keep waiting here as it might result in hang in case the current mtr has latch on oldest lsn */ - if (log_sys->lsn - log_sys->last_checkpoint_lsn + margin - > log_sys->log_group_capacity) { + if (log_sys.lsn - log_sys.last_checkpoint_lsn + margin + > log_sys.log_group_capacity) { /* The log write of 'len' might overwrite the transaction log after the last checkpoint. Makes checkpoint. */ bool flushed_enough = false; - if (log_sys->lsn - log_buf_pool_get_oldest_modification() + if (log_sys.lsn - log_buf_pool_get_oldest_modification() + margin - <= log_sys->log_group_capacity) { + <= log_sys.log_group_capacity) { flushed_enough = true; } - log_sys->check_flush_or_checkpoint = true; + log_sys.check_flush_or_checkpoint = true; log_mutex_exit(); DEBUG_SYNC_C("margin_checkpoint_age_rescue"); @@ -359,7 +359,7 @@ log_reserve_and_open( loop: ut_ad(log_mutex_own()); - if (log_sys->is_extending) { + if (log_sys.is_extending) { log_mutex_exit(); /* Log buffer size is extending. Writing up to the next block @@ -379,7 +379,7 @@ loop: len_upper_limit = LOG_BUF_WRITE_MARGIN + srv_log_write_ahead_size + (5 * len) / 4; - if (log_sys->buf_free + len_upper_limit > srv_log_buffer_size) { + if (log_sys.buf_free + len_upper_limit > srv_log_buffer_size) { log_mutex_exit(); DEBUG_SYNC_C("log_buf_size_exceeded"); @@ -395,7 +395,7 @@ loop: goto loop; } - return(log_sys->lsn); + return(log_sys.lsn); } /************************************************************//** @@ -407,7 +407,6 @@ log_write_low( const byte* str, /*!< in: string */ ulint str_len) /*!< in: string length */ { - log_t* log = log_sys; ulint len; ulint data_len; byte* log_block; @@ -416,7 +415,7 @@ log_write_low( part_loop: /* Calculate a part length */ - data_len = (log->buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len; + data_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len; if (data_len <= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) { @@ -427,18 +426,18 @@ part_loop: data_len = OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE; len = OS_FILE_LOG_BLOCK_SIZE - - (log->buf_free % OS_FILE_LOG_BLOCK_SIZE) + - (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_TRL_SIZE; } - ut_memcpy(log->buf + log->buf_free, str, len); + memcpy(log_sys.buf + log_sys.buf_free, str, len); str_len -= len; str = str + len; log_block = static_cast( - ut_align_down( - log->buf + log->buf_free, OS_FILE_LOG_BLOCK_SIZE)); + ut_align_down(log_sys.buf + log_sys.buf_free, + OS_FILE_LOG_BLOCK_SIZE)); log_block_set_data_len(log_block, data_len); @@ -446,20 +445,21 @@ part_loop: /* This block became full */ log_block_set_data_len(log_block, OS_FILE_LOG_BLOCK_SIZE); log_block_set_checkpoint_no(log_block, - log_sys->next_checkpoint_no); + log_sys.next_checkpoint_no); len += LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE; - log->lsn += len; + log_sys.lsn += len; /* Initialize the next block header */ - log_block_init(log_block + OS_FILE_LOG_BLOCK_SIZE, log->lsn); + log_block_init(log_block + OS_FILE_LOG_BLOCK_SIZE, + log_sys.lsn); } else { - log->lsn += len; + log_sys.lsn += len; } - log->buf_free += len; + log_sys.buf_free += ulong(len); - ut_ad(log->buf_free <= srv_log_buffer_size); + ut_ad(log_sys.buf_free <= srv_log_buffer_size); if (str_len > 0) { goto part_loop; @@ -479,16 +479,15 @@ log_close(void) ulint first_rec_group; lsn_t oldest_lsn; lsn_t lsn; - log_t* log = log_sys; lsn_t checkpoint_age; ut_ad(log_mutex_own()); - lsn = log->lsn; + lsn = log_sys.lsn; log_block = static_cast( - ut_align_down( - log->buf + log->buf_free, OS_FILE_LOG_BLOCK_SIZE)); + ut_align_down(log_sys.buf + log_sys.buf_free, + OS_FILE_LOG_BLOCK_SIZE)); first_rec_group = log_block_get_first_rec_group(log_block); @@ -501,14 +500,13 @@ log_close(void) log_block, log_block_get_data_len(log_block)); } - if (log->buf_free > log->max_buf_free) { - - log->check_flush_or_checkpoint = true; + if (log_sys.buf_free > log_sys.max_buf_free) { + log_sys.check_flush_or_checkpoint = true; } - checkpoint_age = lsn - log->last_checkpoint_lsn; + checkpoint_age = lsn - log_sys.last_checkpoint_lsn; - if (checkpoint_age >= log->log_group_capacity) { + if (checkpoint_age >= log_sys.log_group_capacity) { DBUG_EXECUTE_IF( "print_all_chkp_warnings", log_has_printed_chkp_warning = false;); @@ -521,23 +519,22 @@ log_close(void) ib::error() << "The age of the last checkpoint is " << checkpoint_age << ", which exceeds the log" - " group capacity " << log->log_group_capacity + " group capacity " + << log_sys.log_group_capacity << "."; } } - if (checkpoint_age <= log->max_modified_age_sync) { - + if (checkpoint_age <= log_sys.max_modified_age_sync) { goto function_exit; } oldest_lsn = buf_pool_get_oldest_modification(); if (!oldest_lsn - || lsn - oldest_lsn > log->max_modified_age_sync - || checkpoint_age > log->max_checkpoint_age_async) { - - log->check_flush_or_checkpoint = true; + || lsn - oldest_lsn > log_sys.max_modified_age_sync + || checkpoint_age > log_sys.max_checkpoint_age_async) { + log_sys.check_flush_or_checkpoint = true; } function_exit: @@ -685,90 +682,87 @@ log_set_capacity(ulonglong file_size) log_mutex_enter(); - log_sys->log_group_capacity = smallest_capacity; + log_sys.log_group_capacity = smallest_capacity; - log_sys->max_modified_age_async = margin + log_sys.max_modified_age_async = margin - margin / LOG_POOL_PREFLUSH_RATIO_ASYNC; - log_sys->max_modified_age_sync = margin + log_sys.max_modified_age_sync = margin - margin / LOG_POOL_PREFLUSH_RATIO_SYNC; - log_sys->max_checkpoint_age_async = margin - margin + log_sys.max_checkpoint_age_async = margin - margin / LOG_POOL_CHECKPOINT_RATIO_ASYNC; - log_sys->max_checkpoint_age = margin; + log_sys.max_checkpoint_age = margin; log_mutex_exit(); return(true); } -/** Initializes the redo logging subsystem. */ -void -log_sys_init() +/** Initialize the redo log subsystem. */ +void log_t::create() { - log_sys = static_cast(ut_zalloc_nokey(sizeof(log_t))); + ut_ad(this == &log_sys); + ut_ad(!is_initialised()); + m_initialised= true; - mutex_create(LATCH_ID_LOG_SYS, &log_sys->mutex); - mutex_create(LATCH_ID_LOG_WRITE, &log_sys->write_mutex); + mutex_create(LATCH_ID_LOG_SYS, &mutex); + mutex_create(LATCH_ID_LOG_WRITE, &write_mutex); + mutex_create(LATCH_ID_LOG_FLUSH_ORDER, &log_flush_order_mutex); - mutex_create(LATCH_ID_LOG_FLUSH_ORDER, &log_sys->log_flush_order_mutex); + /* Start the lsn from one log block from zero: this way every + log record has a non-zero start lsn, a fact which we will use */ - /* Start the lsn from one log block from zero: this way every - log record has a start lsn != zero, a fact which we will use */ + lsn= LOG_START_LSN; - log_sys->lsn = LOG_START_LSN; + ut_ad(srv_log_buffer_size >= 16 * OS_FILE_LOG_BLOCK_SIZE); + ut_ad(srv_log_buffer_size >= 4U << srv_page_size_shift); - ut_ad(srv_log_buffer_size >= 16 * OS_FILE_LOG_BLOCK_SIZE); - ut_ad(srv_log_buffer_size >= 4U << srv_page_size_shift); + buf= static_cast(ut_malloc_dontdump(srv_log_buffer_size * 2)); - log_sys->buf = static_cast( - ut_malloc_dontdump(srv_log_buffer_size * 2)); + first_in_use= true; - log_sys->first_in_use = true; + max_buf_free= srv_log_buffer_size / LOG_BUF_FLUSH_RATIO - + LOG_BUF_FLUSH_MARGIN; + check_flush_or_checkpoint= true; - log_sys->max_buf_free = srv_log_buffer_size / LOG_BUF_FLUSH_RATIO - - LOG_BUF_FLUSH_MARGIN; - log_sys->check_flush_or_checkpoint = true; + n_log_ios_old= n_log_ios; + last_printout_time= time(NULL); - log_sys->n_log_ios_old = log_sys->n_log_ios; - log_sys->last_printout_time = time(NULL); - /*----------------------------*/ + buf_next_to_write= 0; + is_extending= false; + write_lsn= lsn; + flushed_to_disk_lsn= 0; + n_pending_flushes= 0; + flush_event = os_event_create("log_flush_event"); + os_event_set(flush_event); + n_log_ios= 0; + n_log_ios_old= 0; + log_group_capacity= 0; + max_modified_age_async= 0; + max_modified_age_sync= 0; + max_checkpoint_age_async= 0; + max_checkpoint_age= 0; + next_checkpoint_no= 0; + next_checkpoint_lsn= 0; + append_on_checkpoint= NULL; + n_pending_checkpoint_writes= 0; - log_sys->write_lsn = log_sys->lsn; + last_checkpoint_lsn= lsn; + rw_lock_create(checkpoint_lock_key, &checkpoint_lock, SYNC_NO_ORDER_CHECK); - log_sys->flush_event = os_event_create(0); + log_block_init(buf, lsn); + log_block_set_first_rec_group(buf, LOG_BLOCK_HDR_SIZE); - os_event_set(log_sys->flush_event); + buf_free= LOG_BLOCK_HDR_SIZE; + lsn= LOG_START_LSN + LOG_BLOCK_HDR_SIZE; - /*----------------------------*/ + MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, lsn - last_checkpoint_lsn); - log_sys->last_checkpoint_lsn = log_sys->lsn; - - rw_lock_create( - checkpoint_lock_key, &log_sys->checkpoint_lock, - SYNC_NO_ORDER_CHECK); - - log_sys->checkpoint_buf_ptr = static_cast( - ut_zalloc_nokey(2 * OS_FILE_LOG_BLOCK_SIZE)); - - log_sys->checkpoint_buf = static_cast( - ut_align(log_sys->checkpoint_buf_ptr, OS_FILE_LOG_BLOCK_SIZE)); - - /*----------------------------*/ - - log_block_init(log_sys->buf, log_sys->lsn); - log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE); - - log_sys->buf_free = LOG_BLOCK_HDR_SIZE; - log_sys->lsn = LOG_START_LSN + LOG_BLOCK_HDR_SIZE; // TODO(minliz): ensure various LOG_START_LSN? - - MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); - - log_scrub_thread_active = !srv_read_only_mode && srv_scrub_log; - if (log_scrub_thread_active) { - log_scrub_event = os_event_create("log_scrub_event"); - os_thread_create(log_scrub_thread, NULL, NULL); - } + log_scrub_thread_active= !srv_read_only_mode && srv_scrub_log; + if (log_scrub_thread_active) { + log_scrub_event= os_event_create("log_scrub_event"); + os_thread_create(log_scrub_thread, NULL, NULL); + } } /** Initialize the redo log. @@ -777,7 +771,7 @@ void log_init(ulint n_files) { ulint i; - log_group_t* group = &log_sys->log; + log_group_t* group = &log_sys.log; group->n_files = n_files; group->format = srv_encrypt_log @@ -885,7 +879,7 @@ log_group_file_header_flush( " file " ULINTPF " header", start_lsn, nth_file)); - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); @@ -1005,7 +999,7 @@ loop: log_block_store_checksum(buf + i * OS_FILE_LOG_BLOCK_SIZE); } - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); @@ -1043,9 +1037,9 @@ static void log_write_flush_to_disk_low() { - /* FIXME: This is not holding log_sys->mutex while + /* FIXME: This is not holding log_sys.mutex while calling os_event_set()! */ - ut_a(log_sys->n_pending_flushes == 1); /* No other threads here */ + ut_a(log_sys.n_pending_flushes == 1); /* No other threads here */ bool do_flush = srv_file_flush_method != SRV_O_DSYNC; @@ -1057,12 +1051,12 @@ log_write_flush_to_disk_low() log_mutex_enter(); if (do_flush) { - log_sys->flushed_to_disk_lsn = log_sys->current_flush_lsn; + log_sys.flushed_to_disk_lsn = log_sys.current_flush_lsn; } - log_sys->n_pending_flushes--; + log_sys.n_pending_flushes--; - os_event_set(log_sys->flush_event); + os_event_set(log_sys.flush_event); } /** Switch the log buffer in use, and copy the content of last block @@ -1075,29 +1069,29 @@ log_buffer_switch() ut_ad(log_mutex_own()); ut_ad(log_write_mutex_own()); - const byte* old_buf = log_sys->buf; - ulint area_end = ut_calc_align(log_sys->buf_free, + const byte* old_buf = log_sys.buf; + ulint area_end = ut_calc_align(log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE); - if (log_sys->first_in_use) { - log_sys->first_in_use = false; - ut_ad(log_sys->buf == ut_align(log_sys->buf, + if (log_sys.first_in_use) { + log_sys.first_in_use = false; + ut_ad(log_sys.buf == ut_align(log_sys.buf, OS_FILE_LOG_BLOCK_SIZE)); - log_sys->buf += srv_log_buffer_size; + log_sys.buf += srv_log_buffer_size; } else { - log_sys->first_in_use = true; - log_sys->buf -= srv_log_buffer_size; - ut_ad(log_sys->buf == ut_align(log_sys->buf, + log_sys.first_in_use = true; + log_sys.buf -= srv_log_buffer_size; + ut_ad(log_sys.buf == ut_align(log_sys.buf, OS_FILE_LOG_BLOCK_SIZE)); } /* Copy the last block to new buf */ - ut_memcpy(log_sys->buf, + ut_memcpy(log_sys.buf, old_buf + area_end - OS_FILE_LOG_BLOCK_SIZE, OS_FILE_LOG_BLOCK_SIZE); - log_sys->buf_free %= OS_FILE_LOG_BLOCK_SIZE; - log_sys->buf_next_to_write = log_sys->buf_free; + log_sys.buf_free %= OS_FILE_LOG_BLOCK_SIZE; + log_sys.buf_next_to_write = log_sys.buf_free; } /** Ensure that the log has been written to the log file up to a given @@ -1142,7 +1136,7 @@ loop: (flush_to_disk == true) case, because the log_mutex contention also works as the arbitrator for write-IO (fsync) bandwidth between log files and data files. */ - if (!flush_to_disk && log_sys->write_lsn >= lsn) { + if (!flush_to_disk && log_sys.write_lsn >= lsn) { return; } #endif @@ -1151,8 +1145,8 @@ loop: ut_ad(!recv_no_log_write); lsn_t limit_lsn = flush_to_disk - ? log_sys->flushed_to_disk_lsn - : log_sys->write_lsn; + ? log_sys.flushed_to_disk_lsn + : log_sys.write_lsn; if (limit_lsn >= lsn) { log_write_mutex_exit(); @@ -1165,15 +1159,15 @@ loop: pending flush and based on that we wait for it to finish before proceeding further. */ if (flush_to_disk - && (log_sys->n_pending_flushes > 0 - || !os_event_is_set(log_sys->flush_event))) { + && (log_sys.n_pending_flushes > 0 + || !os_event_is_set(log_sys.flush_event))) { /* Figure out if the current flush will do the job for us. */ - bool work_done = log_sys->current_flush_lsn >= lsn; + bool work_done = log_sys.current_flush_lsn >= lsn; log_write_mutex_exit(); - os_event_wait(log_sys->flush_event); + os_event_wait(log_sys.flush_event); if (work_done) { return; @@ -1184,7 +1178,7 @@ loop: log_mutex_enter(); if (!flush_to_disk - && log_sys->buf_free == log_sys->buf_next_to_write) { + && log_sys.buf_free == log_sys.buf_next_to_write) { /* Nothing to write and no flush to disk requested */ log_mutex_exit_all(); return; @@ -1198,15 +1192,15 @@ loop: ulint pad_size; DBUG_PRINT("ib_log", ("write " LSN_PF " to " LSN_PF, - log_sys->write_lsn, - log_sys->lsn)); + log_sys.write_lsn, + log_sys.lsn)); if (flush_to_disk) { - log_sys->n_pending_flushes++; - log_sys->current_flush_lsn = log_sys->lsn; + log_sys.n_pending_flushes++; + log_sys.current_flush_lsn = log_sys.lsn; MONITOR_INC(MONITOR_PENDING_LOG_FLUSH); - os_event_reset(log_sys->flush_event); + os_event_reset(log_sys.flush_event); - if (log_sys->buf_free == log_sys->buf_next_to_write) { + if (log_sys.buf_free == log_sys.buf_next_to_write) { /* Nothing to write, flush only */ log_mutex_exit_all(); log_write_flush_to_disk_low(); @@ -1215,25 +1209,25 @@ loop: } } - start_offset = log_sys->buf_next_to_write; - end_offset = log_sys->buf_free; + start_offset = log_sys.buf_next_to_write; + end_offset = log_sys.buf_free; area_start = ut_calc_align_down(start_offset, OS_FILE_LOG_BLOCK_SIZE); area_end = ut_calc_align(end_offset, OS_FILE_LOG_BLOCK_SIZE); ut_ad(area_end - area_start > 0); - log_block_set_flush_bit(log_sys->buf + area_start, TRUE); + log_block_set_flush_bit(log_sys.buf + area_start, TRUE); log_block_set_checkpoint_no( - log_sys->buf + area_end - OS_FILE_LOG_BLOCK_SIZE, - log_sys->next_checkpoint_no); + log_sys.buf + area_end - OS_FILE_LOG_BLOCK_SIZE, + log_sys.next_checkpoint_no); - write_lsn = log_sys->lsn; - write_buf = log_sys->buf; + write_lsn = log_sys.lsn; + write_buf = log_sys.buf; log_buffer_switch(); - log_group_set_fields(&log_sys->log, log_sys->write_lsn); + log_group_set_fields(&log_sys.log, log_sys.write_lsn); log_mutex_exit(); /* Erase the end of the last log block. */ @@ -1247,7 +1241,7 @@ loop: end_offset = log_group_calc_lsn_offset( ut_uint64_align_up(write_lsn, OS_FILE_LOG_BLOCK_SIZE), - &log_sys->log); + &log_sys.log); end_offset_in_unit = (ulint) (end_offset % write_ahead_size); if (end_offset_in_unit > 0 @@ -1262,37 +1256,37 @@ loop: } } - if (log_sys->is_encrypted()) { - log_crypt(write_buf + area_start, log_sys->write_lsn, + if (log_sys.is_encrypted()) { + log_crypt(write_buf + area_start, log_sys.write_lsn, area_end - area_start); } /* Do the write to the log files */ log_group_write_buf( - &log_sys->log, write_buf + area_start, + &log_sys.log, write_buf + area_start, area_end - area_start + pad_size, #ifdef UNIV_DEBUG pad_size, #endif /* UNIV_DEBUG */ - ut_uint64_align_down(log_sys->write_lsn, + ut_uint64_align_down(log_sys.write_lsn, OS_FILE_LOG_BLOCK_SIZE), start_offset - area_start); srv_stats.log_padded.add(pad_size); - log_sys->write_lsn = write_lsn; + log_sys.write_lsn = write_lsn; if (srv_file_flush_method == SRV_O_DSYNC) { /* O_SYNC means the OS did not buffer the log file at all: so we have also flushed to disk what we have written */ - log_sys->flushed_to_disk_lsn = log_sys->write_lsn; + log_sys.flushed_to_disk_lsn = log_sys.write_lsn; } log_write_mutex_exit(); if (flush_to_disk) { log_write_flush_to_disk_low(); - ib_uint64_t write_lsn = log_sys->write_lsn; - ib_uint64_t flush_lsn = log_sys->flushed_to_disk_lsn; + ib_uint64_t write_lsn = log_sys.write_lsn; + ib_uint64_t flush_lsn = log_sys.flushed_to_disk_lsn; log_mutex_exit(); innobase_mysql_log_notify(write_lsn, flush_lsn); @@ -1324,11 +1318,11 @@ log_buffer_sync_in_background( log_mutex_enter(); - lsn = log_sys->lsn; + lsn = log_sys.lsn; if (flush - && log_sys->n_pending_flushes > 0 - && log_sys->current_flush_lsn >= lsn) { + && log_sys.n_pending_flushes > 0 + && log_sys.current_flush_lsn >= lsn) { /* The write + flush will write enough */ log_mutex_exit(); return; @@ -1348,14 +1342,13 @@ void log_flush_margin(void) /*==================*/ { - log_t* log = log_sys; lsn_t lsn = 0; log_mutex_enter(); - if (log->buf_free > log->max_buf_free) { + if (log_sys.buf_free > log_sys.max_buf_free) { /* We can write during flush */ - lsn = log->lsn; + lsn = log_sys.lsn; } log_mutex_exit(); @@ -1435,20 +1428,20 @@ log_complete_checkpoint(void) /*=========================*/ { ut_ad(log_mutex_own()); - ut_ad(log_sys->n_pending_checkpoint_writes == 0); + ut_ad(log_sys.n_pending_checkpoint_writes == 0); - log_sys->next_checkpoint_no++; + log_sys.next_checkpoint_no++; - log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn; + log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); + log_sys.lsn - log_sys.last_checkpoint_lsn); DBUG_PRINT("ib_log", ("checkpoint ended at " LSN_PF ", flushed to " LSN_PF, - log_sys->last_checkpoint_lsn, - log_sys->flushed_to_disk_lsn)); + log_sys.last_checkpoint_lsn, + log_sys.flushed_to_disk_lsn)); - rw_lock_x_unlock_gen(&(log_sys->checkpoint_lock), LOG_CHECKPOINT); + rw_lock_x_unlock_gen(&(log_sys.checkpoint_lock), LOG_CHECKPOINT); } /******************************************************//** @@ -1462,9 +1455,9 @@ log_io_complete_checkpoint(void) log_mutex_enter(); - ut_ad(log_sys->n_pending_checkpoint_writes > 0); + ut_ad(log_sys.n_pending_checkpoint_writes > 0); - if (--log_sys->n_pending_checkpoint_writes == 0) { + if (--log_sys.n_pending_checkpoint_writes == 0) { log_complete_checkpoint(); } @@ -1482,29 +1475,29 @@ log_group_checkpoint(lsn_t end_lsn) ut_ad(!srv_read_only_mode); ut_ad(log_mutex_own()); - ut_ad(end_lsn == 0 || end_lsn >= log_sys->next_checkpoint_lsn); - ut_ad(end_lsn <= log_sys->lsn); - ut_ad(end_lsn + SIZE_OF_MLOG_CHECKPOINT <= log_sys->lsn + ut_ad(end_lsn == 0 || end_lsn >= log_sys.next_checkpoint_lsn); + ut_ad(end_lsn <= log_sys.lsn); + ut_ad(end_lsn + SIZE_OF_MLOG_CHECKPOINT <= log_sys.lsn || srv_shutdown_state != SRV_SHUTDOWN_NONE); DBUG_PRINT("ib_log", ("checkpoint " UINT64PF " at " LSN_PF " written", - log_sys->next_checkpoint_no, - log_sys->next_checkpoint_lsn)); + log_sys.next_checkpoint_no, + log_sys.next_checkpoint_lsn)); - log_group_t* group = &log_sys->log; + log_group_t* group = &log_sys.log; buf = group->checkpoint_buf; memset(buf, 0, OS_FILE_LOG_BLOCK_SIZE); - mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys->next_checkpoint_no); - mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys->next_checkpoint_lsn); + mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys.next_checkpoint_no); + mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys.next_checkpoint_lsn); - if (log_sys->is_encrypted()) { + if (log_sys.is_encrypted()) { log_crypt_write_checkpoint_buf(buf); } - lsn_offset = log_group_calc_lsn_offset(log_sys->next_checkpoint_lsn, + lsn_offset = log_group_calc_lsn_offset(log_sys.next_checkpoint_lsn, group); mach_write_to_8(buf + LOG_CHECKPOINT_OFFSET, lsn_offset); mach_write_to_8(buf + LOG_CHECKPOINT_LOG_BUF_SIZE, @@ -1515,15 +1508,15 @@ log_group_checkpoint(lsn_t end_lsn) MONITOR_INC(MONITOR_PENDING_CHECKPOINT_WRITE); - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); ut_ad(LOG_CHECKPOINT_1 < srv_page_size); ut_ad(LOG_CHECKPOINT_2 < srv_page_size); - if (log_sys->n_pending_checkpoint_writes++ == 0) { - rw_lock_x_lock_gen(&log_sys->checkpoint_lock, + if (log_sys.n_pending_checkpoint_writes++ == 0) { + rw_lock_x_lock_gen(&log_sys.checkpoint_lock, LOG_CHECKPOINT); } @@ -1537,7 +1530,7 @@ log_group_checkpoint(lsn_t end_lsn) fil_io(IORequestLogWrite, false, page_id_t(SRV_LOG_SPACE_FIRST_ID, 0), univ_page_size, - (log_sys->next_checkpoint_no & 1) + (log_sys.next_checkpoint_no & 1) ? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1, OS_FILE_LOG_BLOCK_SIZE, buf, (byte*) group + 1); @@ -1545,7 +1538,7 @@ log_group_checkpoint(lsn_t end_lsn) ut_ad(((ulint) group & 0x1UL) == 0); } -/** Read a log group header page to log_sys->checkpoint_buf. +/** Read a log group header page to log_sys.checkpoint_buf. @param[in] group log group @param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */ void @@ -1555,7 +1548,7 @@ log_group_header_read( { ut_ad(log_mutex_own()); - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); @@ -1563,7 +1556,7 @@ log_group_header_read( page_id_t(SRV_LOG_SPACE_FIRST_ID, header >> srv_page_size_shift), univ_page_size, header & (srv_page_size - 1), - OS_FILE_LOG_BLOCK_SIZE, log_sys->checkpoint_buf, NULL); + OS_FILE_LOG_BLOCK_SIZE, log_sys.checkpoint_buf, NULL); } /** Write checkpoint info to the log header and invoke log_mutex_exit(). @@ -1583,8 +1576,8 @@ log_write_checkpoint_info(bool sync, lsn_t end_lsn) if (sync) { /* Wait for the checkpoint write to complete */ - rw_lock_s_lock(&log_sys->checkpoint_lock); - rw_lock_s_unlock(&log_sys->checkpoint_lock); + rw_lock_s_lock(&log_sys.checkpoint_lock); + rw_lock_s_unlock(&log_sys.checkpoint_lock); DBUG_EXECUTE_IF( "crash_after_checkpoint", @@ -1600,8 +1593,8 @@ log_append_on_checkpoint( mtr_buf_t* buf) { log_mutex_enter(); - mtr_buf_t* old = log_sys->append_on_checkpoint; - log_sys->append_on_checkpoint = buf; + mtr_buf_t* old = log_sys.append_on_checkpoint; + log_sys.append_on_checkpoint = buf; log_mutex_exit(); return(old); } @@ -1652,24 +1645,24 @@ log_checkpoint( oldest_lsn = log_buf_pool_get_oldest_modification(); /* Because log also contains headers and dummy log records, - log_buf_pool_get_oldest_modification() will return log_sys->lsn + log_buf_pool_get_oldest_modification() will return log_sys.lsn if the buffer pool contains no dirty buffers. We must make sure that the log is flushed up to that lsn. If there are dirty buffers in the buffer pool, then our write-ahead-logging algorithm ensures that the log has been flushed up to oldest_lsn. */ - ut_ad(oldest_lsn >= log_sys->last_checkpoint_lsn); + ut_ad(oldest_lsn >= log_sys.last_checkpoint_lsn); if (!write_always && oldest_lsn - <= log_sys->last_checkpoint_lsn + SIZE_OF_MLOG_CHECKPOINT) { + <= log_sys.last_checkpoint_lsn + SIZE_OF_MLOG_CHECKPOINT) { /* Do nothing, because nothing was logged (other than a MLOG_CHECKPOINT marker) since the previous checkpoint. */ log_mutex_exit(); return(true); } /* Repeat the MLOG_FILE_NAME records after the checkpoint, in - case some log records between the checkpoint and log_sys->lsn + case some log records between the checkpoint and log_sys.lsn need them. Finally, write a MLOG_CHECKPOINT marker. Redo log apply expects to see a MLOG_CHECKPOINT after the checkpoint, except on clean shutdown, where the log will be empty after @@ -1680,14 +1673,14 @@ log_checkpoint( threads will be blocked, and no pages can be added to the flush lists. */ lsn_t flush_lsn = oldest_lsn; - const lsn_t end_lsn = log_sys->lsn; + const lsn_t end_lsn = log_sys.lsn; const bool do_write = srv_shutdown_state == SRV_SHUTDOWN_NONE || flush_lsn != end_lsn; if (fil_names_clear(flush_lsn, do_write)) { - ut_ad(log_sys->lsn >= end_lsn + SIZE_OF_MLOG_CHECKPOINT); - flush_lsn = log_sys->lsn; + ut_ad(log_sys.lsn >= end_lsn + SIZE_OF_MLOG_CHECKPOINT); + flush_lsn = log_sys.lsn; } log_mutex_exit(); @@ -1710,28 +1703,28 @@ log_checkpoint( log_mutex_enter(); - ut_ad(log_sys->flushed_to_disk_lsn >= flush_lsn); + ut_ad(log_sys.flushed_to_disk_lsn >= flush_lsn); ut_ad(flush_lsn >= oldest_lsn); - if (log_sys->last_checkpoint_lsn >= oldest_lsn) { + if (log_sys.last_checkpoint_lsn >= oldest_lsn) { log_mutex_exit(); return(true); } - if (log_sys->n_pending_checkpoint_writes > 0) { + if (log_sys.n_pending_checkpoint_writes > 0) { /* A checkpoint write is running */ log_mutex_exit(); if (sync) { /* Wait for the checkpoint write to complete */ - rw_lock_s_lock(&log_sys->checkpoint_lock); - rw_lock_s_unlock(&log_sys->checkpoint_lock); + rw_lock_s_lock(&log_sys.checkpoint_lock); + rw_lock_s_unlock(&log_sys.checkpoint_lock); } return(false); } - log_sys->next_checkpoint_lsn = oldest_lsn; + log_sys.next_checkpoint_lsn = oldest_lsn; log_write_checkpoint_info(sync, end_lsn); ut_ad(!log_mutex_own()); @@ -1769,7 +1762,6 @@ void log_checkpoint_margin(void) /*=======================*/ { - log_t* log = log_sys; lsn_t age; lsn_t checkpoint_age; ib_uint64_t advance; @@ -1781,39 +1773,39 @@ loop: log_mutex_enter(); ut_ad(!recv_no_log_write); - if (!log->check_flush_or_checkpoint) { + if (!log_sys.check_flush_or_checkpoint) { log_mutex_exit(); return; } oldest_lsn = log_buf_pool_get_oldest_modification(); - age = log->lsn - oldest_lsn; + age = log_sys.lsn - oldest_lsn; - if (age > log->max_modified_age_sync) { + if (age > log_sys.max_modified_age_sync) { /* A flush is urgent: we have to do a synchronous preflush */ - advance = age - log->max_modified_age_sync; + advance = age - log_sys.max_modified_age_sync; } - checkpoint_age = log->lsn - log->last_checkpoint_lsn; + checkpoint_age = log_sys.lsn - log_sys.last_checkpoint_lsn; bool checkpoint_sync; bool do_checkpoint; - if (checkpoint_age > log->max_checkpoint_age) { + if (checkpoint_age > log_sys.max_checkpoint_age) { /* A checkpoint is urgent: we do it synchronously */ checkpoint_sync = true; do_checkpoint = true; - } else if (checkpoint_age > log->max_checkpoint_age_async) { + } else if (checkpoint_age > log_sys.max_checkpoint_age_async) { /* A checkpoint is not urgent: do it asynchronously */ do_checkpoint = true; checkpoint_sync = false; - log->check_flush_or_checkpoint = false; + log_sys.check_flush_or_checkpoint = false; } else { do_checkpoint = false; checkpoint_sync = false; - log->check_flush_or_checkpoint = false; + log_sys.check_flush_or_checkpoint = false; } log_mutex_exit(); @@ -1828,9 +1820,7 @@ loop: thread doing a flush at the same time. */ if (!success) { log_mutex_enter(); - - log->check_flush_or_checkpoint = true; - + log_sys.check_flush_or_checkpoint = true; log_mutex_exit(); goto loop; } @@ -1861,7 +1851,7 @@ log_check_margins(void) log_checkpoint_margin(); log_mutex_enter(); ut_ad(!recv_no_log_write); - check = log_sys->check_flush_or_checkpoint; + check = log_sys.check_flush_or_checkpoint; log_mutex_exit(); } while (check); } @@ -1886,7 +1876,7 @@ logs_empty_and_mark_files_at_shutdown(void) srv_shutdown_state = SRV_SHUTDOWN_CLEANUP; loop: ut_ad(lock_sys.is_initialised() || !srv_was_started); - ut_ad(log_sys || !srv_was_started); + ut_ad(log_sys.is_initialised() || !srv_was_started); ut_ad(fil_system.is_initialised() || !srv_was_started); os_event_set(srv_buf_resize_event); @@ -2023,10 +2013,10 @@ wait_suspend_loop: os_event_set(log_scrub_event); } - if (log_sys) { + if (log_sys.is_initialised()) { log_mutex_enter(); - const ulint n_write = log_sys->n_pending_checkpoint_writes; - const ulint n_flush = log_sys->n_pending_flushes; + const ulint n_write = log_sys.n_pending_checkpoint_writes; + const ulint n_flush = log_sys.n_pending_flushes; log_mutex_exit(); if (log_scrub_thread_active || n_write || n_flush) { @@ -2090,10 +2080,10 @@ wait_suspend_loop: log_mutex_enter(); - lsn = log_sys->lsn; + lsn = log_sys.lsn; - const bool lsn_changed = lsn != log_sys->last_checkpoint_lsn; - ut_ad(lsn >= log_sys->last_checkpoint_lsn); + const bool lsn_changed = lsn != log_sys.last_checkpoint_lsn; + ut_ad(lsn >= log_sys.last_checkpoint_lsn); log_mutex_exit(); @@ -2117,7 +2107,7 @@ wait_suspend_loop: "Free innodb buffer pool"); buf_all_freed(); - ut_a(lsn == log_sys->lsn + ut_a(lsn == log_sys.lsn || srv_force_recovery == SRV_FORCE_NO_LOG_REDO); if (lsn < srv_start_lsn) { @@ -2141,7 +2131,7 @@ wait_suspend_loop: /* Make some checks that the server really is quiet */ ut_a(srv_get_active_thread_type() == SRV_NONE); - ut_a(lsn == log_sys->lsn + ut_a(lsn == log_sys.lsn || srv_force_recovery == SRV_FORCE_NO_LOG_REDO); } @@ -2153,8 +2143,8 @@ log_peek_lsn( /*=========*/ lsn_t* lsn) /*!< out: if returns TRUE, current lsn is here */ { - if (0 == mutex_enter_nowait(&(log_sys->mutex))) { - *lsn = log_sys->lsn; + if (0 == mutex_enter_nowait(&(log_sys.mutex))) { + *lsn = log_sys.lsn; log_mutex_exit(); @@ -2181,15 +2171,15 @@ log_print( "Log flushed up to " LSN_PF "\n" "Pages flushed up to " LSN_PF "\n" "Last checkpoint at " LSN_PF "\n", - log_sys->lsn, - log_sys->flushed_to_disk_lsn, + log_sys.lsn, + log_sys.flushed_to_disk_lsn, log_buf_pool_get_oldest_modification(), - log_sys->last_checkpoint_lsn); + log_sys.last_checkpoint_lsn); current_time = time(NULL); time_elapsed = difftime(current_time, - log_sys->last_printout_time); + log_sys.last_printout_time); if (time_elapsed <= 0) { time_elapsed = 1; @@ -2199,15 +2189,15 @@ log_print( ULINTPF " pending log flushes, " ULINTPF " pending chkp writes\n" ULINTPF " log i/o's done, %.2f log i/o's/second\n", - log_sys->n_pending_flushes, - log_sys->n_pending_checkpoint_writes, - log_sys->n_log_ios, + log_sys.n_pending_flushes, + log_sys.n_pending_checkpoint_writes, + log_sys.n_log_ios, static_cast( - log_sys->n_log_ios - log_sys->n_log_ios_old) + log_sys.n_log_ios - log_sys.n_log_ios_old) / time_elapsed); - log_sys->n_log_ios_old = log_sys->n_log_ios; - log_sys->last_printout_time = current_time; + log_sys.n_log_ios_old = log_sys.n_log_ios; + log_sys.last_printout_time = current_time; log_mutex_exit(); } @@ -2218,8 +2208,8 @@ void log_refresh_stats(void) /*===================*/ { - log_sys->n_log_ios_old = log_sys->n_log_ios; - log_sys->last_printout_time = time(NULL); + log_sys.n_log_ios_old = log_sys.n_log_ios; + log_sys.last_printout_time = time(NULL); } /** Close a log group. @@ -2249,39 +2239,38 @@ void log_group_close_all(void) /*=====================*/ { - log_group_close(&log_sys->log); + log_group_close(&log_sys.log); } /** Shut down the redo log subsystem. */ -void -log_shutdown() +void log_t::close() { - log_group_close_all(); + ut_ad(this == &log_sys); + if (!is_initialised()) return; + m_initialised = false; + log_group_close_all(); - if (!log_sys->first_in_use) { - log_sys->buf -= srv_log_buffer_size; - } - ut_free_dodump(log_sys->buf, srv_log_buffer_size * 2); - log_sys->buf = NULL; - ut_free(log_sys->checkpoint_buf_ptr); - log_sys->checkpoint_buf_ptr = NULL; - log_sys->checkpoint_buf = NULL; + if (!first_in_use) + buf -= srv_log_buffer_size; + ut_free_dodump(buf, srv_log_buffer_size * 2); + buf = NULL; - os_event_destroy(log_sys->flush_event); + os_event_destroy(flush_event); - rw_lock_free(&log_sys->checkpoint_lock); + rw_lock_free(&checkpoint_lock); + /* rw_lock_free() already called checkpoint_lock.~rw_lock_t(); + tame the debug assertions when the destructor will be called once more. */ + ut_ad(checkpoint_lock.magic_n == 0); + ut_d(checkpoint_lock.magic_n = RW_LOCK_MAGIC_N); - mutex_free(&log_sys->mutex); - mutex_free(&log_sys->write_mutex); - mutex_free(&log_sys->log_flush_order_mutex); + mutex_free(&mutex); + mutex_free(&write_mutex); + mutex_free(&log_flush_order_mutex); - if (!srv_read_only_mode && srv_scrub_log) { - os_event_destroy(log_scrub_event); - } + if (!srv_read_only_mode && srv_scrub_log) + os_event_destroy(log_scrub_event); - recv_sys_close(); - ut_free(log_sys); - log_sys = NULL; + recv_sys_close(); } /******************************************************//** @@ -2302,7 +2291,7 @@ log_pad_current_log_block(void) lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE); pad_length = OS_FILE_LOG_BLOCK_SIZE - - (log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE) + - (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_TRL_SIZE; if (pad_length == (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE @@ -2319,7 +2308,7 @@ log_pad_current_log_block(void) log_write_low(&b, 1); } - lsn = log_sys->lsn; + lsn = log_sys.lsn; log_close(); @@ -2335,14 +2324,14 @@ log_scrub() /*=========*/ { log_mutex_enter(); - ulint cur_lbn = log_block_convert_lsn_to_no(log_sys->lsn); + ulint cur_lbn = log_block_convert_lsn_to_no(log_sys.lsn); if (next_lbn_to_pad == cur_lbn) { log_pad_current_log_block(); } - next_lbn_to_pad = log_block_convert_lsn_to_no(log_sys->lsn); + next_lbn_to_pad = log_block_convert_lsn_to_no(log_sys.lsn); log_mutex_exit(); } diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 08e88389eaf..223733ccc27 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -79,7 +79,7 @@ volatile bool recv_recovery_on; bool recv_needed_recovery; #ifdef UNIV_DEBUG /** TRUE if writing to the redo log (mtr_commit) is forbidden. -Protected by log_sys->mutex. */ +Protected by log_sys.mutex. */ bool recv_no_log_write = false; #endif /* UNIV_DEBUG */ @@ -669,7 +669,7 @@ loop: (source_offset % group->file_size)); } - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); @@ -760,13 +760,13 @@ recv_synchronize_groups() lsn_t start_lsn = ut_uint64_align_down(recovered_lsn, OS_FILE_LOG_BLOCK_SIZE); - log_group_read_log_seg(log_sys->buf, &log_sys->log, + log_group_read_log_seg(log_sys.buf, &log_sys.log, &start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE); /* Update the fields in the group struct to correspond to recovered_lsn */ - log_group_set_fields(&log_sys->log, recovered_lsn); + log_group_set_fields(&log_sys.log, recovered_lsn); /* Copy the checkpoint info to the log; remember that we have incremented checkpoint_no by one, and the info will not be written @@ -799,10 +799,10 @@ static MY_ATTRIBUTE((warn_unused_result)) dberr_t recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) { - log_group_t* group = &log_sys->log; + log_group_t* group = &log_sys.log; ib_uint64_t max_no = 0; ib_uint64_t checkpoint_no; - byte* buf = log_sys->checkpoint_buf; + byte* buf = log_sys.checkpoint_buf; ut_ad(group->format == 0); @@ -882,12 +882,12 @@ dberr_t recv_log_format_0_recover(lsn_t lsn) { log_mutex_enter(); - log_group_t* group = &log_sys->log; + log_group_t* group = &log_sys.log; const lsn_t source_offset = log_group_calc_lsn_offset(lsn, group); log_mutex_exit(); const ulint page_no = ulint(source_offset >> srv_page_size_shift); - byte* buf = log_sys->buf; + byte* buf = log_sys.buf; static const char* NO_UPGRADE_RECOVERY_MSG = "Upgrade after a crash is not supported." @@ -919,11 +919,11 @@ recv_log_format_0_recover(lsn_t lsn) recv_sys->parse_start_lsn = recv_sys->recovered_lsn = recv_sys->scanned_lsn = recv_sys->mlog_checkpoint_lsn = lsn; - log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn - = log_sys->lsn = log_sys->write_lsn - = log_sys->current_flush_lsn = log_sys->flushed_to_disk_lsn + log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn + = log_sys.lsn = log_sys.write_lsn + = log_sys.current_flush_lsn = log_sys.flushed_to_disk_lsn = lsn; - log_sys->next_checkpoint_no = 0; + log_sys.next_checkpoint_no = 0; return(DB_SUCCESS); } @@ -939,12 +939,12 @@ recv_find_max_checkpoint(ulint* max_field) ulint field; byte* buf; - group = &log_sys->log; + group = &log_sys.log; max_no = 0; *max_field = 0; - buf = log_sys->checkpoint_buf; + buf = log_sys.checkpoint_buf; group->state = LOG_GROUP_CORRUPTED; @@ -1019,7 +1019,7 @@ recv_find_max_checkpoint(ulint* max_field) buf + LOG_CHECKPOINT_LSN); group->lsn_offset = mach_read_from_8( buf + LOG_CHECKPOINT_OFFSET); - log_sys->next_checkpoint_no = checkpoint_no; + log_sys.next_checkpoint_no = checkpoint_no; } } @@ -1751,7 +1751,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block) while (recv) { end_lsn = recv->end_lsn; - ut_ad(end_lsn <= log_sys->log.scanned_lsn); + ut_ad(end_lsn <= log_sys.log.scanned_lsn); if (recv->len > RECV_DATA_BLOCK_SIZE) { /* We have to copy the record body to a separate @@ -2927,11 +2927,11 @@ recv_group_scan_log_recs( OS_FILE_LOG_BLOCK_SIZE); end_lsn = start_lsn; log_group_read_log_seg( - log_sys->buf, group, &end_lsn, + log_sys.buf, group, &end_lsn, start_lsn + RECV_SCAN_SIZE); } while (end_lsn != start_lsn && !recv_scan_log_recs( - available_mem, &store_to_hash, log_sys->buf, + available_mem, &store_to_hash, log_sys.buf, checkpoint_lsn, start_lsn, end_lsn, contiguous_lsn, &group->scanned_lsn)); @@ -3157,14 +3157,14 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) if (err != DB_SUCCESS) { - srv_start_lsn = recv_sys->recovered_lsn = log_sys->lsn; + srv_start_lsn = recv_sys->recovered_lsn = log_sys.lsn; log_mutex_exit(); return(err); } - log_group_header_read(&log_sys->log, max_cp_field); + log_group_header_read(&log_sys.log, max_cp_field); - buf = log_sys->checkpoint_buf; + buf = log_sys.checkpoint_buf; checkpoint_lsn = mach_read_from_8(buf + LOG_CHECKPOINT_LSN); checkpoint_no = mach_read_from_8(buf + LOG_CHECKPOINT_NO); @@ -3177,7 +3177,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) ut_ad(RECV_SCAN_SIZE <= srv_log_buffer_size); - group = &log_sys->log; + group = &log_sys.log; const lsn_t end_lsn = mach_read_from_8( buf + LOG_CHECKPOINT_END_LSN); @@ -3283,7 +3283,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) } } - log_sys->lsn = recv_sys->recovered_lsn; + log_sys.lsn = recv_sys->recovered_lsn; if (recv_needed_recovery) { bool missing_tablespace = false; @@ -3378,8 +3378,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) /* Synchronize the uncorrupted log groups to the most up-to-date log group; we also copy checkpoint info to groups */ - log_sys->next_checkpoint_lsn = checkpoint_lsn; - log_sys->next_checkpoint_no = checkpoint_no + 1; + log_sys.next_checkpoint_lsn = checkpoint_lsn; + log_sys.next_checkpoint_no = checkpoint_no + 1; recv_synchronize_groups(); @@ -3389,24 +3389,24 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) srv_start_lsn = recv_sys->recovered_lsn; } - log_sys->buf_free = ulong(log_sys->lsn % OS_FILE_LOG_BLOCK_SIZE); - log_sys->buf_next_to_write = log_sys->buf_free; - log_sys->write_lsn = log_sys->lsn; + log_sys.buf_free = ulong(log_sys.lsn % OS_FILE_LOG_BLOCK_SIZE); + log_sys.buf_next_to_write = log_sys.buf_free; + log_sys.write_lsn = log_sys.lsn; - log_sys->last_checkpoint_lsn = checkpoint_lsn; + log_sys.last_checkpoint_lsn = checkpoint_lsn; if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) { /* Write a MLOG_CHECKPOINT marker as the first thing, before generating any other redo log. This ensures that subsequent crash recovery will be possible even if the server were killed soon after this. */ - fil_names_clear(log_sys->last_checkpoint_lsn, true); + fil_names_clear(log_sys.last_checkpoint_lsn, true); } MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); + log_sys.lsn - log_sys.last_checkpoint_lsn); - log_sys->next_checkpoint_no = ++checkpoint_no; + log_sys.next_checkpoint_no = ++checkpoint_no; mutex_enter(&recv_sys->mutex); @@ -3512,26 +3512,26 @@ recv_reset_logs( { ut_ad(log_mutex_own()); - log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); + log_sys.lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); - log_sys->log.lsn = log_sys->lsn; - log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE; + log_sys.log.lsn = log_sys.lsn; + log_sys.log.lsn_offset = LOG_FILE_HDR_SIZE; - log_sys->buf_next_to_write = 0; - log_sys->write_lsn = log_sys->lsn; + log_sys.buf_next_to_write = 0; + log_sys.write_lsn = log_sys.lsn; - log_sys->next_checkpoint_no = 0; - log_sys->last_checkpoint_lsn = 0; + log_sys.next_checkpoint_no = 0; + log_sys.last_checkpoint_lsn = 0; - memset(log_sys->buf, 0, srv_log_buffer_size); - log_block_init(log_sys->buf, log_sys->lsn); - log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE); + memset(log_sys.buf, 0, srv_log_buffer_size); + log_block_init(log_sys.buf, log_sys.lsn); + log_block_set_first_rec_group(log_sys.buf, LOG_BLOCK_HDR_SIZE); - log_sys->buf_free = LOG_BLOCK_HDR_SIZE; - log_sys->lsn += LOG_BLOCK_HDR_SIZE; + log_sys.buf_free = LOG_BLOCK_HDR_SIZE; + log_sys.lsn += LOG_BLOCK_HDR_SIZE; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - (log_sys->lsn - log_sys->last_checkpoint_lsn)); + (log_sys.lsn - log_sys.last_checkpoint_lsn)); log_mutex_exit(); diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc index a76886d1d57..2c0ecfeb7d3 100644 --- a/storage/innobase/mtr/mtr0mtr.cc +++ b/storage/innobase/mtr/mtr0mtr.cc @@ -481,7 +481,7 @@ mtr_write_log( ut_ad(!recv_no_log_write); DBUG_PRINT("ib_log", (ULINTPF " extra bytes written at " LSN_PF, - len, log_sys->lsn)); + len, log_sys.lsn)); log_reserve_and_open(len); log->for_each_block(write_log); @@ -624,7 +624,7 @@ mtr_t::commit_checkpoint( if (write_mlog_checkpoint) { DBUG_PRINT("ib_log", ("MLOG_CHECKPOINT(" LSN_PF ") written at " LSN_PF, - checkpoint_lsn, log_sys->lsn)); + checkpoint_lsn, log_sys.lsn)); } } @@ -774,7 +774,7 @@ mtr_t::Command::prepare_write() case MTR_LOG_NONE: ut_ad(m_impl->m_log.size() == 0); log_mutex_enter(); - m_end_lsn = m_start_lsn = log_sys->lsn; + m_end_lsn = m_start_lsn = log_sys.lsn; return(0); case MTR_LOG_ALL: break; diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 82a424e2ea5..5db0a160793 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -144,7 +144,7 @@ public: ut_ad(dict_index_is_spatial(m_index)); DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush", - log_sys->check_flush_or_checkpoint = true; + log_sys.check_flush_or_checkpoint = true; ); for (idx_tuple_vec::iterator it = m_dtuple_vec->begin(); @@ -153,7 +153,7 @@ public: dtuple = *it; ut_ad(dtuple); - if (log_sys->check_flush_or_checkpoint) { + if (log_sys.check_flush_or_checkpoint) { if (!(*mtr_committed)) { btr_pcur_move_to_prev_on_page(pcur); btr_pcur_store_position(pcur, scan_mtr); diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index 53a5ac0f53f..84e88c96742 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -2000,11 +2000,11 @@ srv_mon_process_existing_counter( break; case MONITOR_OVLD_LSN_FLUSHDISK: - value = (mon_type_t) log_sys->flushed_to_disk_lsn; + value = (mon_type_t) log_sys.flushed_to_disk_lsn; break; case MONITOR_OVLD_LSN_CURRENT: - value = (mon_type_t) log_sys->lsn; + value = (mon_type_t) log_sys.lsn; break; case MONITOR_OVLD_BUF_OLDEST_LSN: @@ -2012,15 +2012,15 @@ srv_mon_process_existing_counter( break; case MONITOR_OVLD_LSN_CHECKPOINT: - value = (mon_type_t) log_sys->last_checkpoint_lsn; + value = (mon_type_t) log_sys.last_checkpoint_lsn; break; case MONITOR_OVLD_MAX_AGE_ASYNC: - value = log_sys->max_modified_age_async; + value = log_sys.max_modified_age_async; break; case MONITOR_OVLD_MAX_AGE_SYNC: - value = log_sys->max_modified_age_sync; + value = log_sys.max_modified_age_sync; break; #ifdef BTR_CUR_HASH_ADAPT diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index dcbf1bd0878..2dbd5930965 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -496,7 +496,7 @@ create_log_files( /* Create a log checkpoint. */ log_mutex_enter(); - if (log_sys->is_encrypted() && !log_crypt_init()) { + if (log_sys.is_encrypted() && !log_crypt_init()) { return(DB_ERROR); } ut_d(recv_no_log_write = false); @@ -1370,14 +1370,14 @@ srv_prepare_to_delete_redo_log_files( log_mutex_enter(); - fil_names_clear(log_sys->lsn, false); + fil_names_clear(log_sys.lsn, false); - flushed_lsn = log_sys->lsn; + flushed_lsn = log_sys.lsn; { ib::info info; if (srv_log_file_size == 0 - || (log_sys->log.format + || (log_sys.log.format & ~LOG_HEADER_FORMAT_ENCRYPTED) != LOG_HEADER_FORMAT_CURRENT) { info << "Upgrading redo log: "; @@ -1385,7 +1385,7 @@ srv_prepare_to_delete_redo_log_files( || srv_log_file_size != srv_log_file_size_requested) { if (srv_encrypt_log - == (my_bool)log_sys->is_encrypted()) { + == (my_bool)log_sys.is_encrypted()) { info << (srv_encrypt_log ? "Resizing encrypted" : "Resizing"); @@ -1689,7 +1689,7 @@ dberr_t srv_start(bool create_new_db) } #endif /* UNIV_DEBUG */ - log_sys_init(); + log_sys.create(); recv_sys_init(); lock_sys.create(srv_lock_table_size); @@ -2204,7 +2204,7 @@ files_checked: /* Leave the redo log alone. */ } else if (srv_log_file_size_requested == srv_log_file_size && srv_n_log_files_found == srv_n_log_files - && log_sys->log.format + && log_sys.log.format == (srv_encrypt_log ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED @@ -2674,11 +2674,11 @@ void innodb_shutdown() ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); ut_ad(lock_sys.is_initialised() || !srv_was_started); + ut_ad(log_sys.is_initialised() || !srv_was_started); #ifdef BTR_CUR_HASH_ADAPT ut_ad(btr_search_sys || !srv_was_started); #endif /* BTR_CUR_HASH_ADAPT */ ut_ad(ibuf || !srv_was_started); - ut_ad(log_sys || !srv_was_started); if (dict_stats_event) { dict_stats_thread_deinit(); @@ -2705,9 +2705,7 @@ void innodb_shutdown() if (ibuf) { ibuf_close(); } - if (log_sys) { - log_shutdown(); - } + log_sys.close(); purge_sys.close(); trx_sys.close(); if (buf_dblwr) {