1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge remote-tracking branch 'mergetrees/merge-myrocks' into 10.2-mariarocks

This commit is contained in:
Sergei Petrunia
2017-01-01 23:33:18 +00:00
137 changed files with 6206 additions and 3322 deletions

View File

@@ -7,3 +7,5 @@
--loose-enable-rocksdb_index_file_map --loose-enable-rocksdb_index_file_map
--loose-enable-rocksdb_dbstats --loose-enable-rocksdb_dbstats
--loose-enable-rocksdb_cfstats --loose-enable-rocksdb_cfstats
--loose-enable-rocksdb_lock_info
--loose-enable-rocksdb_trx

View File

@@ -8734,7 +8734,6 @@ double Field_enum::val_real(void)
longlong Field_enum::val_int(void) longlong Field_enum::val_int(void)
{ {
ASSERT_COLUMN_MARKED_FOR_READ;
return read_lowendian(ptr, packlength); return read_lowendian(ptr, packlength);
} }

View File

@@ -5403,7 +5403,7 @@ int handler::compare_key(key_range *range)
This is used by index condition pushdown implementation. This is used by index condition pushdown implementation.
*/ */
int handler::compare_key2(key_range *range) int handler::compare_key2(key_range *range) const
{ {
int cmp; int cmp;
if (!range) if (!range)

View File

@@ -3168,7 +3168,7 @@ public:
virtual int read_range_next(); virtual int read_range_next();
void set_end_range(const key_range *end_key); void set_end_range(const key_range *end_key);
int compare_key(key_range *range); int compare_key(key_range *range);
int compare_key2(key_range *range); int compare_key2(key_range *range) const;
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
void ft_end() { ft_handler=NULL; } void ft_end() { ft_handler=NULL; }
virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key) virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)

View File

@@ -6,6 +6,11 @@ ENDIF()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU)
IF(HAVE_SCHED_GETCPU)
ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1)
ENDIF()
# get a list of rocksdb library source files # get a list of rocksdb library source files
# run with env -i to avoid passing variables # run with env -i to avoid passing variables
EXECUTE_PROCESS( EXECUTE_PROCESS(
@@ -78,32 +83,26 @@ ENDIF()
SET(rocksdb_static_libs ) SET(rocksdb_static_libs )
IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "") IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "")
SET(rocksdb_static_libs ${rocksdb_static_libs} SET(rocksdb_static_libs ${rocksdb_static_libs}
$ENV{WITH_SNAPPY}/lib/libsnappy${PIC_EXT}.a) $ENV{WITH_SNAPPY}/libsnappy${PIC_EXT}.a)
ADD_DEFINITIONS(-DSNAPPY) ADD_DEFINITIONS(-DSNAPPY)
ELSE()
SET(rocksdb_static_libs ${rocksdb_static_libs} snappy)
ENDIF() ENDIF()
IF (NOT "$ENV{WITH_LZ4}" STREQUAL "") IF (NOT "$ENV{WITH_LZ4}" STREQUAL "")
SET(rocksdb_static_libs ${rocksdb_static_libs} SET(rocksdb_static_libs ${rocksdb_static_libs}
$ENV{WITH_LZ4}/lib/liblz4${PIC_EXT}.a) $ENV{WITH_LZ4}/liblz4${PIC_EXT}.a)
ADD_DEFINITIONS(-DLZ4) ADD_DEFINITIONS(-DLZ4)
ELSE()
SET(rocksdb_static_libs ${rocksdb_static_libs} lz4)
ENDIF() ENDIF()
IF (NOT "$ENV{WITH_BZ2}" STREQUAL "") IF (NOT "$ENV{WITH_BZ2}" STREQUAL "")
SET(rocksdb_static_libs ${rocksdb_static_libs} SET(rocksdb_static_libs ${rocksdb_static_libs}
$ENV{WITH_BZ2}/lib/libbz2${PIC_EXT}.a) $ENV{WITH_BZ2}/libbz2${PIC_EXT}.a)
ADD_DEFINITIONS(-DBZIP2) ADD_DEFINITIONS(-DBZIP2)
ELSE()
SET(rocksdb_static_libs ${rocksdb_static_libs} bz2)
ENDIF() ENDIF()
# link ZSTD only if instructed # link ZSTD only if instructed
IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "")
SET(rocksdb_static_libs ${rocksdb_static_libs} SET(rocksdb_static_libs ${rocksdb_static_libs}
$ENV{WITH_ZSTD}/lib/libzstd${PIC_EXT}.a) $ENV{WITH_ZSTD}/libzstd${PIC_EXT}.a)
ADD_DEFINITIONS(-DZSTD) ADD_DEFINITIONS(-DZSTD)
ENDIF() ENDIF()

View File

@@ -41,7 +41,7 @@ extract_index_stats(
) { ) {
std::vector<Rdb_index_stats> ret; std::vector<Rdb_index_stats> ret;
for (auto fn : files) { for (auto fn : files) {
auto it = props.find(fn); const auto it = props.find(fn);
DBUG_ASSERT(it != props.end()); DBUG_ASSERT(it != props.end());
std::vector<Rdb_index_stats> stats; std::vector<Rdb_index_stats> stats;
Rdb_tbl_prop_coll::read_stats_from_tbl_props(it->second, &stats); Rdb_tbl_prop_coll::read_stats_from_tbl_props(it->second, &stats);
@@ -50,6 +50,19 @@ extract_index_stats(
return ret; return ret;
} }
void Rdb_event_listener::update_index_stats(
const rocksdb::TableProperties& props
) {
DBUG_ASSERT(m_ddl_manager != nullptr);
const auto tbl_props =
std::make_shared<const rocksdb::TableProperties>(props);
std::vector<Rdb_index_stats> stats;
Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats);
m_ddl_manager->adjust_stats(stats);
}
void Rdb_event_listener::OnCompactionCompleted( void Rdb_event_listener::OnCompactionCompleted(
rocksdb::DB *db, rocksdb::DB *db,
const rocksdb::CompactionJobInfo& ci const rocksdb::CompactionJobInfo& ci
@@ -69,14 +82,14 @@ void Rdb_event_listener::OnFlushCompleted(
const rocksdb::FlushJobInfo& flush_job_info const rocksdb::FlushJobInfo& flush_job_info
) { ) {
DBUG_ASSERT(db != nullptr); DBUG_ASSERT(db != nullptr);
DBUG_ASSERT(m_ddl_manager != nullptr); update_index_stats(flush_job_info.table_properties);
auto tbl_props = std::make_shared<const rocksdb::TableProperties>(
flush_job_info.table_properties);
std::vector<Rdb_index_stats> stats;
Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats);
m_ddl_manager->adjust_stats(stats);
} }
void Rdb_event_listener::OnExternalFileIngested(
rocksdb::DB* db,
const rocksdb::ExternalFileIngestionInfo& info
) {
DBUG_ASSERT(db != nullptr);
update_index_stats(info.table_properties);
}
} // namespace myrocks } // namespace myrocks

View File

@@ -24,17 +24,26 @@ class Rdb_ddl_manager;
class Rdb_event_listener : public rocksdb::EventListener class Rdb_event_listener : public rocksdb::EventListener
{ {
public: public:
explicit Rdb_event_listener(Rdb_ddl_manager* ddl_manager) : Rdb_event_listener(const Rdb_event_listener&) = delete;
Rdb_event_listener& operator=(const Rdb_event_listener&) = delete;
explicit Rdb_event_listener(Rdb_ddl_manager* const ddl_manager) :
m_ddl_manager(ddl_manager) { m_ddl_manager(ddl_manager) {
} }
void OnCompactionCompleted( void OnCompactionCompleted(
rocksdb::DB *db, const rocksdb::CompactionJobInfo& ci) override; rocksdb::DB* db, const rocksdb::CompactionJobInfo& ci) override;
void OnFlushCompleted( void OnFlushCompleted(
rocksdb::DB* db, const rocksdb::FlushJobInfo& flush_job_info) override; rocksdb::DB* db, const rocksdb::FlushJobInfo& flush_job_info) override;
void OnExternalFileIngested(
rocksdb::DB* db, const rocksdb::ExternalFileIngestionInfo& ingestion_info)
override;
private: private:
Rdb_ddl_manager* m_ddl_manager; Rdb_ddl_manager* m_ddl_manager;
void update_index_stats(
const rocksdb::TableProperties& props);
}; };
} // namespace myrocks } // namespace myrocks

File diff suppressed because it is too large Load Diff

View File

@@ -26,10 +26,17 @@
#include <vector> #include <vector>
/* MySQL header files */ /* MySQL header files */
#include "../storage/xtradb/include/ut0counter.h"
/*
'EMPTY' from field.h conflicts with EMPTY from
/usr/include/x86_64-linux-gnu/bits/utmpx.h
MARIAROCKS_NOT_YET: Sort out #include order so that we don't have to resort
to #undef
*/
#undef EMPTY
#include "./handler.h" /* handler */ #include "./handler.h" /* handler */
#include "./my_global.h" /* ulonglong */ #include "./my_global.h" /* ulonglong */
#include "./sql_string.h" #include "./sql_string.h"
#include "../storage/innobase/include/ut0counter.h"
/* RocksDB header files */ /* RocksDB header files */
#include "rocksdb/cache.h" #include "rocksdb/cache.h"
@@ -62,6 +69,30 @@
namespace myrocks { namespace myrocks {
/*
* class for exporting transaction information for
* information_schema.rocksdb_trx
*/
struct Rdb_trx_info {
std::string name;
ulonglong trx_id;
ulonglong write_count;
ulonglong lock_count;
int timeout_sec;
std::string state;
std::string waiting_key;
ulonglong waiting_cf_id;
int is_replication;
int skip_trx_api;
int read_only;
int deadlock_detect;
int num_ongoing_bulk_load;
ulong thread_id;
std::string query_str;
};
std::vector<Rdb_trx_info> rdb_get_all_trx_info();
/* /*
This is This is
- the name of the default Column Family (the CF which stores indexes which - the name of the default Column Family (the CF which stores indexes which
@@ -160,7 +191,7 @@ const char * const PER_INDEX_CF_NAME = "$per_index_cf";
#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) #define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3)
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS #define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS
inline bool looks_like_per_index_cf_typo(const char *name) inline bool looks_like_per_index_cf_typo(const char* const name)
{ {
return (name && name[0]=='$' && strcmp(name, PER_INDEX_CF_NAME)); return (name && name[0]=='$' && strcmp(name, PER_INDEX_CF_NAME));
} }
@@ -237,13 +268,19 @@ enum operation_type {
ROWS_MAX ROWS_MAX
}; };
#if defined(HAVE_SCHED_GETCPU)
#define RDB_INDEXER get_sched_indexer_t
#else
#define RDB_INDEXER thread_id_indexer_t
#endif
/* Global statistics struct used inside MyRocks */ /* Global statistics struct used inside MyRocks */
struct st_global_stats { struct st_global_stats {
ib_counter_t<ulonglong, 64> rows[ROWS_MAX]; ib_counter_t<ulonglong, 64, RDB_INDEXER> rows[ROWS_MAX];
// system_rows_ stats are only for system // system_rows_ stats are only for system
// tables. They are not counted in rows_* stats. // tables. They are not counted in rows_* stats.
ib_counter_t<ulonglong, 64> system_rows[ROWS_MAX]; ib_counter_t<ulonglong, 64, RDB_INDEXER> system_rows[ROWS_MAX];
}; };
/* Struct used for exporting status to MySQL */ /* Struct used for exporting status to MySQL */
@@ -270,8 +307,8 @@ namespace std {
{ {
std::size_t operator()(const myrocks::GL_INDEX_ID& gl_index_id) const std::size_t operator()(const myrocks::GL_INDEX_ID& gl_index_id) const
{ {
uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 | const uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 |
(uint64_t) gl_index_id.index_id); (uint64_t) gl_index_id.index_id);
return std::hash<uint64_t>()(val); return std::hash<uint64_t>()(val);
} }
}; };
@@ -296,6 +333,8 @@ class ha_rocksdb: public my_core::handler
/* Whether m_scan_it was created with skip_bloom=true */ /* Whether m_scan_it was created with skip_bloom=true */
bool m_scan_it_skips_bloom; bool m_scan_it_skips_bloom;
const rocksdb::Snapshot *m_scan_it_snapshot;
Rdb_tbl_def *m_tbl_def; Rdb_tbl_def *m_tbl_def;
/* Primary Key encoder from KeyTupleFormat to StorageFormat */ /* Primary Key encoder from KeyTupleFormat to StorageFormat */
@@ -407,42 +446,46 @@ class ha_rocksdb: public my_core::handler
/* SST information used for bulk loading the primary key */ /* SST information used for bulk loading the primary key */
std::shared_ptr<Rdb_sst_info> m_sst_info; std::shared_ptr<Rdb_sst_info> m_sst_info;
Rdb_transaction *m_bulk_load_tx;
/* Mutex to protect finalizing bulk load */
mysql_mutex_t m_bulk_load_mutex;
/* /*
MySQL index number for duplicate key error MySQL index number for duplicate key error
*/ */
int m_dupp_errkey; int m_dupp_errkey;
int create_key_defs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, int create_key_defs(const TABLE* const table_arg,
const TABLE *old_table_arg= nullptr, Rdb_tbl_def* const tbl_def_arg,
const Rdb_tbl_def *old_tbl_def_arg= nullptr) const TABLE* const old_table_arg= nullptr,
const Rdb_tbl_def* const old_tbl_def_arg= nullptr) const
__attribute__((__nonnull__(2, 3), __warn_unused_result__)); __attribute__((__nonnull__(2, 3), __warn_unused_result__));
int secondary_index_read(const int keyno, uchar *buf) int secondary_index_read(const int keyno, uchar* const buf)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
void setup_iterator_for_rnd_scan(); void setup_iterator_for_rnd_scan();
void setup_scan_iterator(const std::shared_ptr<const Rdb_key_def>& kd, void setup_scan_iterator(const Rdb_key_def& kd, rocksdb::Slice* const slice)
rocksdb::Slice *slice)
__attribute__((__nonnull__)) __attribute__((__nonnull__))
{ {
setup_scan_iterator(kd, slice, false, false, 0); setup_scan_iterator(kd, slice, false, false, 0);
} }
bool is_ascending(const std::shared_ptr<const Rdb_key_def>& keydef, bool is_ascending(const Rdb_key_def& keydef,
enum ha_rkey_function find_flag) const enum ha_rkey_function find_flag) const
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
void setup_scan_iterator(const std::shared_ptr<const Rdb_key_def>& kd, void setup_scan_iterator(const Rdb_key_def& kd,
rocksdb::Slice *slice, const bool use_all_keys, rocksdb::Slice *slice, const bool use_all_keys,
const bool is_ascending, const uint eq_cond_len) const bool is_ascending, const uint eq_cond_len)
__attribute__((__nonnull__)); __attribute__((__nonnull__));
void release_scan_iterator(void) void release_scan_iterator(void);
{
delete m_scan_it;
m_scan_it= nullptr;
}
int get_row_by_rowid(uchar *buf, const char *rowid, rocksdb::Status get_for_update(Rdb_transaction* const tx,
rocksdb::ColumnFamilyHandle* const column_family,
const rocksdb::Slice& key,
std::string* const value) const;
int get_row_by_rowid(uchar* const buf, const char* const rowid,
const uint rowid_size) const uint rowid_size)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int get_row_by_rowid(uchar *buf, const uchar *rowid, int get_row_by_rowid(uchar* const buf, const uchar* const rowid,
const uint rowid_size) const uint rowid_size)
__attribute__((__nonnull__, __warn_unused_result__)) __attribute__((__nonnull__, __warn_unused_result__))
{ {
@@ -454,23 +497,23 @@ class ha_rocksdb: public my_core::handler
void load_auto_incr_value(); void load_auto_incr_value();
longlong update_hidden_pk_val(); longlong update_hidden_pk_val();
int load_hidden_pk_value() __attribute__((__warn_unused_result__)); int load_hidden_pk_value() __attribute__((__warn_unused_result__));
int read_hidden_pk_id_from_rowkey(longlong *hidden_pk_id) int read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
bool can_use_single_delete(uint index) bool can_use_single_delete(const uint &index) const
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
bool skip_unique_check() __attribute__((__warn_unused_result__)); bool skip_unique_check() const __attribute__((__warn_unused_result__));
#ifdef MARIAROCKS_NOT_YET // MDEV-10975 #ifdef MARIAROCKS_NOT_YET // MDEV-10975
void set_force_skip_unique_check(bool skip) override; void set_force_skip_unique_check(bool skip) override;
#endif #endif
bool commit_in_the_middle() __attribute__((__warn_unused_result__)); bool commit_in_the_middle() __attribute__((__warn_unused_result__));
bool do_bulk_commit(Rdb_transaction *tx) bool do_bulk_commit(Rdb_transaction* const tx)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
bool has_hidden_pk(const TABLE* table) bool has_hidden_pk(const TABLE* const table) const
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
#ifdef MARIAROCKS_NOT_YET #ifdef MARIAROCKS_NOT_YET
void update_row_stats(operation_type type); void update_row_stats(const operation_type &type);
#endif #endif
void set_last_rowkey(const uchar *old_data); void set_last_rowkey(const uchar* const old_data);
/* /*
Array of table->s->fields elements telling how to store fields in the Array of table->s->fields elements telling how to store fields in the
@@ -506,14 +549,15 @@ class ha_rocksdb: public my_core::handler
*/ */
uint m_null_bytes_in_rec; uint m_null_bytes_in_rec;
void get_storage_type(Rdb_field_encoder *encoder, uint kp); void get_storage_type(Rdb_field_encoder* const encoder, const uint &kp);
void setup_field_converters(); void setup_field_converters();
int alloc_key_buffers(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg) int alloc_key_buffers(const TABLE* const table_arg,
const Rdb_tbl_def* const tbl_def_arg)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
void free_key_buffers(); void free_key_buffers();
// the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
rocksdb::Range get_range(int i, uchar buf[]) const; rocksdb::Range get_range(const int &i, uchar buf[]) const;
/* /*
Perf timers for data reads Perf timers for data reads
@@ -536,18 +580,20 @@ public:
Controls whether writes include checksums. This is updated from the session variable Controls whether writes include checksums. This is updated from the session variable
at the start of each query. at the start of each query.
*/ */
bool m_store_checksums; bool m_store_row_debug_checksums;
/* Same as above but for verifying checksums when reading */ /* Same as above but for verifying checksums when reading */
bool m_verify_checksums; bool m_verify_row_debug_checksums;
int m_checksums_pct; int m_checksums_pct;
ha_rocksdb(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg); ha_rocksdb(my_core::handlerton* const hton,
my_core::TABLE_SHARE* const table_arg);
~ha_rocksdb() ~ha_rocksdb()
{ {
int err __attribute__((__unused__)); int err __attribute__((__unused__));
err= finalize_bulk_load(); err= finalize_bulk_load();
DBUG_ASSERT(err == 0); DBUG_ASSERT(err == 0);
mysql_mutex_destroy(&m_bulk_load_mutex);
} }
/** @brief /** @brief
@@ -617,41 +663,44 @@ public:
return true; return true;
} }
bool should_store_checksums() const bool should_store_row_debug_checksums() const
{ {
return m_store_checksums && (rand() % 100 < m_checksums_pct); return m_store_row_debug_checksums && (rand() % 100 < m_checksums_pct);
} }
int rename_table(const char *from, const char *to) int rename_table(const char* const from, const char* const to)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int convert_record_from_storage_format(const rocksdb::Slice *key, int convert_record_from_storage_format(const rocksdb::Slice* const key,
const rocksdb::Slice *value, const rocksdb::Slice* const value,
uchar *buf) uchar* const buf)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int convert_record_from_storage_format(const rocksdb::Slice *key, int convert_record_from_storage_format(const rocksdb::Slice* const key,
uchar *buf) uchar* const buf)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
void convert_record_to_storage_format(const rocksdb::Slice& pk_packed_slice, void convert_record_to_storage_format(const rocksdb::Slice& pk_packed_slice,
Rdb_string_writer *pk_unpack_info, Rdb_string_writer* const pk_unpack_info,
rocksdb::Slice *packed_rec) rocksdb::Slice* const packed_rec)
__attribute__((__nonnull__)); __attribute__((__nonnull__));
static const char* get_key_name(const uint index, const TABLE* table_arg, static const char* get_key_name(const uint index,
const Rdb_tbl_def* tbl_def_arg) const TABLE* const table_arg,
const Rdb_tbl_def* const tbl_def_arg)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
static const char* get_key_comment(const uint index, const TABLE* table_arg, static const char* get_key_comment(const uint index,
const Rdb_tbl_def* tbl_def_arg) const TABLE* const table_arg,
const Rdb_tbl_def* const tbl_def_arg)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
static bool is_hidden_pk(const uint index, const TABLE* table_arg, static bool is_hidden_pk(const uint index, const TABLE* const table_arg,
const Rdb_tbl_def* tbl_def_arg) const Rdb_tbl_def* const tbl_def_arg)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
static uint pk_index(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg) static uint pk_index(const TABLE* const table_arg,
const Rdb_tbl_def* const tbl_def_arg)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
static bool is_pk(const uint index, const TABLE* table_arg, static bool is_pk(const uint index, const TABLE* table_arg,
@@ -689,52 +738,53 @@ public:
ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
/* At the moment, we're ok with default handler::index_init() implementation. */ /* At the moment, we're ok with default handler::index_init() implementation. */
int index_read_map(uchar * buf, const uchar * key, int index_read_map(uchar* const buf, const uchar* const key,
key_part_map keypart_map, key_part_map keypart_map,
enum ha_rkey_function find_flag); enum ha_rkey_function find_flag);
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int index_read_map_impl(uchar * buf, const uchar * key, int index_read_map_impl(uchar* const buf, const uchar* const key,
key_part_map keypart_map, key_part_map keypart_map,
enum ha_rkey_function find_flag, enum ha_rkey_function find_flag,
const key_range *end_key) const key_range* end_key)
__attribute__((__warn_unused_result__));; __attribute__((__warn_unused_result__));;
int index_read_last_map(uchar * buf, const uchar * key, int index_read_last_map(uchar* const buf, const uchar* const key,
key_part_map keypart_map) key_part_map keypart_map)
__attribute__((__warn_unused_result__));; __attribute__((__warn_unused_result__));;
int read_range_first(const key_range *start_key, int read_range_first(const key_range* const start_key,
const key_range *end_key, const key_range* const end_key,
bool eq_range, bool sorted) bool eq_range, bool sorted)
__attribute__((__warn_unused_result__));; __attribute__((__warn_unused_result__));;
virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; } virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
virtual double read_time(uint, uint, ha_rows rows) override; virtual double read_time(uint, uint, ha_rows rows) override;
int open(const char *name, int mode, uint test_if_locked) int open(const char* const name, int mode, uint test_if_locked)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int close(void) __attribute__((__warn_unused_result__)); int close(void) __attribute__((__warn_unused_result__));
int write_row(uchar *buf) __attribute__((__warn_unused_result__)); int write_row(uchar* const buf) __attribute__((__warn_unused_result__));
int update_row(const uchar *old_data, uchar *new_data) int update_row(const uchar* const old_data, uchar* const new_data)
__attribute__((__warn_unused_result__));
int delete_row(const uchar* const buf)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int delete_row(const uchar *buf) __attribute__((__warn_unused_result__));
rocksdb::Status delete_or_singledelete(uint index, rocksdb::Status delete_or_singledelete(uint index,
Rdb_transaction *tx, Rdb_transaction* const tx,
rocksdb::ColumnFamilyHandle* cf, rocksdb::ColumnFamilyHandle* const cf,
const rocksdb::Slice& key) const rocksdb::Slice& key)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int index_next(uchar *buf) __attribute__((__warn_unused_result__)); int index_next(uchar* const buf) __attribute__((__warn_unused_result__));
int index_next_with_direction(uchar *buf, bool move_forward) int index_next_with_direction(uchar* const buf, bool move_forward)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int index_prev(uchar *buf) __attribute__((__warn_unused_result__)); int index_prev(uchar* const buf) __attribute__((__warn_unused_result__));
int index_first(uchar *buf) __attribute__((__warn_unused_result__)); int index_first(uchar* const buf) __attribute__((__warn_unused_result__));
int index_last(uchar *buf) __attribute__((__warn_unused_result__)); int index_last(uchar* const buf) __attribute__((__warn_unused_result__));
class Item* idx_cond_push(uint keyno, class Item* idx_cond); class Item* idx_cond_push(uint keyno, class Item* const idx_cond);
/* /*
Default implementation from cancel_pushed_idx_cond() suits us Default implementation from cancel_pushed_idx_cond() suits us
*/ */
@@ -761,31 +811,32 @@ private:
bool skip_unique_check; bool skip_unique_check;
}; };
int create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, int create_cfs(const TABLE* const table_arg, Rdb_tbl_def* const tbl_def_arg,
std::array<struct key_def_cf_info, MAX_INDEXES + 1>* cfs); std::array<struct key_def_cf_info, MAX_INDEXES + 1>* const cfs) const;
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int create_key_def(const TABLE *table_arg, uint i, int create_key_def(const TABLE* const table_arg, const uint &i,
const Rdb_tbl_def* tbl_def_arg, const Rdb_tbl_def* const tbl_def_arg,
std::shared_ptr<Rdb_key_def>* new_key_def, std::shared_ptr<Rdb_key_def>* const new_key_def,
const struct key_def_cf_info& cf_info); const struct key_def_cf_info& cf_info) const;
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int create_inplace_key_defs(const TABLE *table_arg, int create_inplace_key_defs(const TABLE* const table_arg,
Rdb_tbl_def *tbl_def_arg, Rdb_tbl_def* vtbl_def_arg,
const TABLE *old_table_arg, const TABLE* const old_table_arg,
const Rdb_tbl_def *old_tbl_def_arg, const Rdb_tbl_def* const old_tbl_def_arg,
const std::array<key_def_cf_info, MAX_INDEXES + 1>& cfs); const std::array<key_def_cf_info, MAX_INDEXES + 1>& cfs) const;
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
std::unordered_map<std::string, uint> get_old_key_positions( std::unordered_map<std::string, uint> get_old_key_positions(
const TABLE* table_arg, const TABLE* table_arg,
const Rdb_tbl_def* tbl_def_arg, const Rdb_tbl_def* tbl_def_arg,
const TABLE* old_table_arg, const TABLE* old_table_arg,
const Rdb_tbl_def* old_tbl_def_arg) const Rdb_tbl_def* old_tbl_def_arg) const
__attribute__((__nonnull__)); __attribute__((__nonnull__));
int compare_key_parts(const KEY* old_key, const KEY* new_key); int compare_key_parts(const KEY* const old_key,
const KEY* const new_key) const;
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int index_first_intern(uchar *buf) int index_first_intern(uchar *buf)
@@ -793,89 +844,99 @@ private:
int index_last_intern(uchar *buf) int index_last_intern(uchar *buf)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
enum icp_result check_index_cond(); enum icp_result check_index_cond() const;
int find_icp_matching_index_rec(bool move_forward, uchar *buf) int find_icp_matching_index_rec(const bool &move_forward, uchar* const buf)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
void calc_updated_indexes(); void calc_updated_indexes();
int update_write_row(const uchar *old_data, const uchar *new_data, int update_write_row(const uchar* const old_data, const uchar* const new_data,
const bool skip_unique_check) const bool skip_unique_check)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int get_pk_for_update(struct update_row_info* row_info); int get_pk_for_update(struct update_row_info* const row_info);
int check_and_lock_unique_pk(uint key_id, int check_and_lock_unique_pk(const uint &key_id,
const struct update_row_info& row_info, const struct update_row_info& row_info,
bool* found, bool* pk_changed) bool* const found, bool* const pk_changed)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int check_and_lock_sk(uint key_id, const struct update_row_info& row_info, int check_and_lock_sk(const uint &key_id,
bool* found) const const struct update_row_info& row_info,
bool* const found) const
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int check_uniqueness_and_lock(const struct update_row_info& row_info, int check_uniqueness_and_lock(const struct update_row_info& row_info,
bool* pk_changed) bool* const pk_changed)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
bool over_bulk_load_threshold(int* err) bool over_bulk_load_threshold(int* err)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int bulk_load_key(Rdb_transaction* tx, int bulk_load_key(Rdb_transaction* const tx,
const std::shared_ptr<const Rdb_key_def>& kd, const Rdb_key_def& kd,
const rocksdb::Slice& key, const rocksdb::Slice& key,
const rocksdb::Slice& value) const rocksdb::Slice& value)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int update_pk(const std::shared_ptr<const Rdb_key_def>& kd, int update_pk(const Rdb_key_def& kd,
const struct update_row_info& row_info, const struct update_row_info& row_info,
bool pk_changed) const bool &pk_changed)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int update_sk(const TABLE* table_arg, int update_sk(const TABLE* const table_arg,
const std::shared_ptr<const Rdb_key_def>& kd, const Rdb_key_def& kd,
const struct update_row_info& row_info) const struct update_row_info& row_info)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int update_indexes(const struct update_row_info& row_info, bool pk_changed) int update_indexes(const struct update_row_info& row_info,
const bool &pk_changed)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int read_key_exact(const std::shared_ptr<const Rdb_key_def>& kd, int read_key_exact(const Rdb_key_def& kd,
rocksdb::Iterator* iter, bool using_full_key, rocksdb::Iterator* const iter, const bool &using_full_key,
const rocksdb::Slice& key_slice) const const rocksdb::Slice& key_slice) const
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int read_before_key(const std::shared_ptr<const Rdb_key_def>& kd, int read_before_key(const Rdb_key_def& kd,
bool using_full_key, const rocksdb::Slice& key_slice) const bool &using_full_key,
const rocksdb::Slice& key_slice)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int read_after_key(const std::shared_ptr<const Rdb_key_def>& kd, int read_after_key(const Rdb_key_def& kd,
bool using_full_key, const rocksdb::Slice& key_slice) const bool &using_full_key,
const rocksdb::Slice& key_slice)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int position_to_correct_key(const std::shared_ptr<const Rdb_key_def>& kd, int position_to_correct_key(const Rdb_key_def& kd,
enum ha_rkey_function find_flag, const enum ha_rkey_function &find_flag,
bool full_key_match, const uchar* key, const bool &full_key_match,
key_part_map keypart_map, const uchar* const key,
const key_part_map &keypart_map,
const rocksdb::Slice& key_slice, const rocksdb::Slice& key_slice,
bool* move_forward) bool* const move_forward)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int read_row_from_primary_key(uchar* buf) int read_row_from_primary_key(uchar* const buf)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int read_row_from_secondary_key(uchar* buf, int read_row_from_secondary_key(uchar* const buf,
const std::shared_ptr<const Rdb_key_def>& kd, const Rdb_key_def& kd,
bool move_forward) bool move_forward)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int try_keyonly_read_from_sk(uchar* buf, int try_keyonly_read_from_sk(uchar* buf,
const std::shared_ptr<const Rdb_key_def>& kd, const Rdb_key_def& kd,
const rocksdb::Slice& key, const rocksdb::Slice& key,
const rocksdb::Slice& value, const rocksdb::Slice& value,
uint rowid_size) uint rowid_size)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int calc_eq_cond_len(const std::shared_ptr<const Rdb_key_def>& kd, int calc_eq_cond_len(const Rdb_key_def& kd,
enum ha_rkey_function find_flag, const enum ha_rkey_function &find_flag,
const rocksdb::Slice& slice, int bytes_changed_by_succ, const rocksdb::Slice& slice,
const key_range *end_key, uint* end_key_packed_size) const int &bytes_changed_by_succ,
const key_range* const end_key,
uint* const end_key_packed_size)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
Rdb_tbl_def* get_table_if_exists(const char* tablename) Rdb_tbl_def* get_table_if_exists(const char* const tablename)
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
void read_thd_vars(THD *thd) void read_thd_vars(THD* const thd)
__attribute__((__nonnull__)); __attribute__((__nonnull__));
bool contains_foreign_key(THD* thd) const char* thd_rocksdb_tmpdir()
__attribute__((__nonnull__, __warn_unused_result__)); __attribute__((__nonnull__, __warn_unused_result__));
int inplace_populate_sk(const TABLE* table_arg, bool contains_foreign_key(THD* const thd)
__attribute__((__nonnull__, __warn_unused_result__));
int inplace_populate_sk(const TABLE* const table_arg,
const std::unordered_set<std::shared_ptr<Rdb_key_def>>& indexes); const std::unordered_set<std::shared_ptr<Rdb_key_def>>& indexes);
public: public:
@@ -895,12 +956,13 @@ public:
int rnd_init(bool scan) __attribute__((__warn_unused_result__)); int rnd_init(bool scan) __attribute__((__warn_unused_result__));
int rnd_end() __attribute__((__warn_unused_result__)); int rnd_end() __attribute__((__warn_unused_result__));
int rnd_next(uchar *buf) __attribute__((__warn_unused_result__)); int rnd_next(uchar* const buf) __attribute__((__warn_unused_result__));
int rnd_next_with_direction(uchar *buf, bool move_forward) int rnd_next_with_direction(uchar* const buf, bool move_forward)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int rnd_pos(uchar *buf, uchar *pos) __attribute__((__warn_unused_result__)); int rnd_pos(uchar* const buf, uchar* const pos)
void position(const uchar *record); __attribute__((__warn_unused_result__));
void position(const uchar* const record);
int info(uint) override; int info(uint) override;
/* This function will always return success, therefore no annotation related /* This function will always return success, therefore no annotation related
@@ -908,9 +970,9 @@ public:
* required by the interface. */ * required by the interface. */
int extra(enum ha_extra_function operation); int extra(enum ha_extra_function operation);
int start_stmt(THD *thd, thr_lock_type lock_type) int start_stmt(THD* const thd, thr_lock_type lock_type)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int external_lock(THD *thd, int lock_type) int external_lock(THD* const thd, int lock_type)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int delete_all_rows() __attribute__((__warn_unused_result__)); int delete_all_rows() __attribute__((__warn_unused_result__));
int truncate() __attribute__((__warn_unused_result__)); int truncate() __attribute__((__warn_unused_result__));
@@ -922,66 +984,69 @@ public:
return 0; return 0;
} }
int check(THD* thd, HA_CHECK_OPT* check_opt) int check(THD* const thd, HA_CHECK_OPT* const check_opt)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
void remove_rows(Rdb_tbl_def *tbl); void remove_rows(Rdb_tbl_def* const tbl);
ha_rows records_in_range(uint inx, key_range *min_key, ha_rows records_in_range(uint inx, key_range* const min_key,
key_range *max_key) key_range* const max_key)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int delete_table(const char *from) __attribute__((__warn_unused_result__)); int delete_table(const char* const from) __attribute__((__warn_unused_result__));
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) int create(const char* const name, TABLE* const form,
HA_CREATE_INFO* const create_info)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
bool check_if_incompatible_data(HA_CREATE_INFO *info, bool check_if_incompatible_data(HA_CREATE_INFO* const info,
uint table_changes) uint table_changes)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, THR_LOCK_DATA **store_lock(THD* const thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type) enum thr_lock_type lock_type)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
my_bool register_query_cache_table(THD *thd, char *table_key, my_bool register_query_cache_table(THD* const thd, char* const table_key,
uint key_length, uint key_length,
qc_engine_callback qc_engine_callback* const engine_callback,
*engine_callback, ulonglong* const engine_data)
ulonglong *engine_data)
{ {
/* Currently, we don't support query cache */ /* Currently, we don't support query cache */
return FALSE; return FALSE;
} }
bool get_error_message(const int error, String *buf) bool get_error_message(const int error, String* const buf)
__attribute__((__nonnull__)); __attribute__((__nonnull__));
void get_auto_increment(ulonglong offset, ulonglong increment, void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values, ulonglong nb_desired_values,
ulonglong *first_value, ulonglong* const first_value,
ulonglong *nb_reserved_values); ulonglong* const nb_reserved_values);
void update_create_info(HA_CREATE_INFO *create_info); void update_create_info(HA_CREATE_INFO* const create_info);
int optimize(THD *thd, HA_CHECK_OPT *check_opt) int optimize(THD* const thd, HA_CHECK_OPT* const check_opt)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
int analyze(THD* thd, HA_CHECK_OPT* check_opt) int analyze(THD* const thd, HA_CHECK_OPT* const check_opt)
__attribute__((__warn_unused_result__));
int calculate_stats(const TABLE* const table_arg, THD* const thd,
HA_CHECK_OPT* const check_opt)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
enum_alter_inplace_result check_if_supported_inplace_alter( enum_alter_inplace_result check_if_supported_inplace_alter(
TABLE *altered_table, TABLE *altered_table,
my_core::Alter_inplace_info *ha_alter_info) override; my_core::Alter_inplace_info* const ha_alter_info) override;
bool prepare_inplace_alter_table(TABLE *altered_table, bool prepare_inplace_alter_table(TABLE* const altered_table,
my_core::Alter_inplace_info *ha_alter_info); my_core::Alter_inplace_info* const ha_alter_info);
bool inplace_alter_table(TABLE *altered_table, bool inplace_alter_table(TABLE* const altered_table,
my_core::Alter_inplace_info *ha_alter_info); my_core::Alter_inplace_info* const ha_alter_info);
bool commit_inplace_alter_table(TABLE *altered_table, bool commit_inplace_alter_table(TABLE* const altered_table,
my_core::Alter_inplace_info *ha_alter_info, my_core::Alter_inplace_info* const ha_alter_info,
bool commit); bool commit);
int finalize_bulk_load() __attribute__((__warn_unused_result__)); int finalize_bulk_load() __attribute__((__warn_unused_result__));
#ifdef MARIAROCKS_NOT_YET // MDEV-10976 #ifdef MARIAROCKS_NOT_YET // MDEV-10976
void set_use_read_free_rpl(const char* whitelist); void set_use_read_free_rpl(const char* const whitelist);
#endif #endif
void set_skip_unique_check_tables(const char* whitelist); void set_skip_unique_check_tables(const char* const whitelist);
#ifdef MARIAROCKS_NOT_YET // MDEV-10976 #ifdef MARIAROCKS_NOT_YET // MDEV-10976
public: public:
@@ -1007,10 +1072,10 @@ public:
struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx
{ {
/* The new table definition */ /* The new table definition */
Rdb_tbl_def* m_new_tdef; Rdb_tbl_def* const m_new_tdef;
/* Stores the original key definitions */ /* Stores the original key definitions */
std::shared_ptr<Rdb_key_def>* m_old_key_descr; std::shared_ptr<Rdb_key_def>* const m_old_key_descr;
/* Stores the new key definitions */ /* Stores the new key definitions */
std::shared_ptr<Rdb_key_def>* m_new_key_descr; std::shared_ptr<Rdb_key_def>* m_new_key_descr;
@@ -1022,10 +1087,10 @@ struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx
const uint m_new_n_keys; const uint m_new_n_keys;
/* Stores the added key glids */ /* Stores the added key glids */
std::unordered_set<std::shared_ptr<Rdb_key_def>> m_added_indexes; const std::unordered_set<std::shared_ptr<Rdb_key_def>> m_added_indexes;
/* Stores the dropped key glids */ /* Stores the dropped key glids */
std::unordered_set<GL_INDEX_ID> m_dropped_index_ids; const std::unordered_set<GL_INDEX_ID> m_dropped_index_ids;
/* Stores number of keys to add */ /* Stores number of keys to add */
const uint m_n_added_keys; const uint m_n_added_keys;

View File

@@ -24,13 +24,15 @@
/* RocksDB includes */ /* RocksDB includes */
#include "rocksdb/table.h" #include "rocksdb/table.h"
#include "rocksdb/utilities/transaction_db.h"
namespace myrocks { namespace myrocks {
enum RDB_IO_ERROR_TYPE { enum RDB_IO_ERROR_TYPE {
RDB_IO_ERROR_TX_COMMIT, RDB_IO_ERROR_TX_COMMIT,
RDB_IO_ERROR_DICT_COMMIT, RDB_IO_ERROR_DICT_COMMIT,
RDB_IO_ERROR_BG_THREAD RDB_IO_ERROR_BG_THREAD,
RDB_IO_ERROR_GENERAL
}; };
void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type); void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type);
@@ -58,7 +60,7 @@ void rdb_queue_save_stats_request();
Access to singleton objects. Access to singleton objects.
*/ */
rocksdb::DB *rdb_get_rocksdb_db(); rocksdb::TransactionDB *rdb_get_rocksdb_db();
class Rdb_cf_manager; class Rdb_cf_manager;
Rdb_cf_manager& rdb_get_cf_manager(); Rdb_cf_manager& rdb_get_cf_manager();

View File

@@ -61,7 +61,7 @@ class Rdb_logger : public rocksdb::Logger
Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap); Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap);
} }
void SetRocksDBLogger(std::shared_ptr<rocksdb::Logger> logger) void SetRocksDBLogger(const std::shared_ptr<rocksdb::Logger> logger)
{ {
m_logger = logger; m_logger = logger;
} }

View File

@@ -59,8 +59,9 @@ eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1; connection con1;
--error ER_LOCK_DEADLOCK --error 0,ER_LOCK_DEADLOCK
reap; reap;
--echo ERROR: $mysql_errno
connection default; connection default;
disconnect con1; disconnect con1;

View File

@@ -64,8 +64,9 @@ DELETE FROM t0 WHERE id=190000;
COMMIT; COMMIT;
connection con1; connection con1;
--error ER_LOCK_DEADLOCK --error 0,ER_LOCK_DEADLOCK
reap; reap;
--echo ERROR: $mysql_errno
COMMIT; COMMIT;
connection default; connection default;

View File

@@ -64,8 +64,9 @@ UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT; COMMIT;
connection con1; connection con1;
--error ER_LOCK_DEADLOCK --error 0,ER_LOCK_DEADLOCK
reap; reap;
--echo ERROR: $mysql_errno
COMMIT; COMMIT;
connection default; connection default;

View File

@@ -0,0 +1,44 @@
# Disable for valgrind because this takes too long
DROP DATABASE IF EXISTS mysqlslap;
CREATE DATABASE mysqlslap;
USE mysqlslap;
CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb;
# 2PC enabled, MyRocks durability enabled
SET GLOBAL rocksdb_disable_2pc=0;
SET GLOBAL rocksdb_write_sync=1;
## 2PC + durability + single thread
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 1000 then 'true' else 'false' end
true
## 2PC + durability + group commit
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end
true
# 2PC enabled, MyRocks durability disabled
SET GLOBAL rocksdb_disable_2pc=0;
SET GLOBAL rocksdb_write_sync=0;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
true
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
true
# 2PC disabled, MyRocks durability enabled
SET GLOBAL rocksdb_disable_2pc=1;
SET GLOBAL rocksdb_write_sync=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
true
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
true
SET GLOBAL rocksdb_disable_2pc=1;
SET GLOBAL rocksdb_write_sync=0;
DROP TABLE t1;
DROP DATABASE mysqlslap;

View File

@@ -276,101 +276,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
100 100
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 5);
INSERT INTO t1 (a, b) VALUES (2, 6);
INSERT INTO t1 (a, b) VALUES (3, 7);
# crash_during_online_index_creation
flush logs;
SET SESSION debug_dbug="+d,crash_during_online_index_creation";
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
SET SESSION debug_dbug="-d,crash_during_online_index_creation";
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL,
KEY `ka` (`a`),
KEY `kab` (`a`,`b`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1;
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
# crash_during_index_creation_partition
flush logs;
SET SESSION debug_dbug="+d,crash_during_index_creation_partition";
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
SET SESSION debug_dbug="-d,crash_during_index_creation_partition";
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL,
`j` int(11) DEFAULT NULL,
`k` int(11) DEFAULT NULL,
PRIMARY KEY (`i`),
KEY `j` (`j`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (i)
PARTITIONS 4 */
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SELECT * FROM t1 ORDER BY i LIMIT 10;
i j k
1 1 1
2 2 2
3 3 3
4 4 4
5 5 5
6 6 6
7 7 7
8 8 8
9 9 9
10 10 10
SELECT COUNT(*) FROM t1;
COUNT(*)
100
DROP TABLE t1;
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
# crash_during_index_creation_partition
flush logs;
SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback";
# expected assertion failure from sql layer here for alter rollback
call mtr.add_suppression("Assertion `0' failed.");
call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback";
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL,
`j` int(11) DEFAULT NULL,
`k` int(11) DEFAULT NULL,
PRIMARY KEY (`i`),
KEY `j` (`j`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (i)
PARTITIONS 4 */
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL,
`j` int(11) DEFAULT NULL,
`k` int(11) DEFAULT NULL,
PRIMARY KEY (`i`),
KEY `j` (`j`),
KEY `kij` (`i`,`j`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (i)
PARTITIONS 4 */
SELECT COUNT(*) FROM t1;
COUNT(*)
100
DROP TABLE t1;
set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check;
set global rocksdb_strict_collation_check=1; set global rocksdb_strict_collation_check=1;
CREATE TABLE t1 (a INT, b TEXT); CREATE TABLE t1 (a INT, b TEXT);
@@ -379,3 +284,127 @@ ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary
ALTER TABLE t1 ADD PRIMARY KEY(a); ALTER TABLE t1 ADD PRIMARY KEY(a);
DROP TABLE t1; DROP TABLE t1;
set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check;
set global rocksdb_bulk_load=1;
# Establish connection con1 (user=root)
connect con1,localhost,root,,;
# Switch to connection con1
connection con1;
show global variables like 'rocksdb_bulk_load';
Variable_name Value
rocksdb_bulk_load ON
show session variables like 'rocksdb_bulk_load';
Variable_name Value
rocksdb_bulk_load ON
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1,1);
# Disconnecting on con1
disconnect con1;
# Establish connection con2 (user=root)
connect con2,localhost,root,,;
# Switch to connection con2
connection con2;
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY);
COUNT(*)
1
SELECT COUNT(*) FROM t1 FORCE INDEX(kj);
COUNT(*)
1
DROP TABLE t1;
disconnect con2;
# Establish connection con1 (user=root)
connect con1,localhost,root,,;
# Establish connection con2 (user=root)
connect con2,localhost,root,,;
# Switch to connection con1
connection con1;
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
set rocksdb_bulk_load=1;
INSERT INTO t1 VALUES (1,1);
# Switch to connection con2
connection con2;
SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY);
COUNT(*)
0
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY);
COUNT(*)
1
SELECT COUNT(*) FROM t1 FORCE INDEX(kj);
COUNT(*)
1
set global rocksdb_bulk_load=0;
DROP TABLE t1;
connection default;
SET @prior_rocksdb_merge_combine_read_size= @@rocksdb_merge_combine_read_size;
SET @prior_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check;
SET @prior_rocksdb_merge_buf_size = @@rocksdb_merge_buf_size;
SET global rocksdb_strict_collation_check = off;
SET session rocksdb_merge_combine_read_size = 566;
SET session rocksdb_merge_buf_size = 336;
show variables like '%rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load OFF
rocksdb_bulk_load_size 1000
CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB;
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
ALTER TABLE t1 ADD INDEX ka(a), ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` varchar(80) DEFAULT NULL,
KEY `ka` (`a`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
SELECT * FROM t1 FORCE INDEX(ka) WHERE a > "";
a
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
DROP TABLE t1;
SET session rocksdb_merge_buf_size = @prior_rocksdb_merge_buf_size;
SET session rocksdb_merge_combine_read_size = @prior_rocksdb_merge_combine_read_size;
SET global rocksdb_strict_collation_check = @prior_rocksdb_strict_collation_check;
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
set global rocksdb_force_flush_memtable_now=1;
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
larger
1
larger
1
Table Op Msg_type Msg_text
test.t1 analyze status OK
larger
1
larger
1
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
select 1300 < 1300 * 1.5 as "same";
same
1
DROP TABLE t1;

View File

@@ -0,0 +1,96 @@
drop table if exists t1;
CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 5);
INSERT INTO t1 (a, b) VALUES (2, 6);
INSERT INTO t1 (a, b) VALUES (3, 7);
# crash_during_online_index_creation
flush logs;
SET SESSION debug="+d,crash_during_online_index_creation";
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
SET SESSION debug="-d,crash_during_online_index_creation";
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL,
KEY `ka` (`a`),
KEY `kab` (`a`,`b`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1;
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
# crash_during_index_creation_partition
flush logs;
SET SESSION debug="+d,crash_during_index_creation_partition";
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
SET SESSION debug="-d,crash_during_index_creation_partition";
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL DEFAULT '0',
`j` int(11) DEFAULT NULL,
`k` int(11) DEFAULT NULL,
PRIMARY KEY (`i`),
KEY `j` (`j`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (i)
PARTITIONS 4 */
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SELECT * FROM t1 ORDER BY i LIMIT 10;
i j k
1 1 1
2 2 2
3 3 3
4 4 4
5 5 5
6 6 6
7 7 7
8 8 8
9 9 9
10 10 10
SELECT COUNT(*) FROM t1;
COUNT(*)
100
DROP TABLE t1;
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
# crash_during_index_creation_partition
flush logs;
SET SESSION debug="+d,myrocks_simulate_index_create_rollback";
# expected assertion failure from sql layer here for alter rollback
call mtr.add_suppression("Assertion `0' failed.");
call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
SET SESSION debug="-d,myrocks_simulate_index_create_rollback";
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL DEFAULT '0',
`j` int(11) DEFAULT NULL,
`k` int(11) DEFAULT NULL,
PRIMARY KEY (`i`),
KEY `j` (`j`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (i)
PARTITIONS 4 */
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL DEFAULT '0',
`j` int(11) DEFAULT NULL,
`k` int(11) DEFAULT NULL,
PRIMARY KEY (`i`),
KEY `j` (`j`),
KEY `kij` (`i`,`j`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (i)
PARTITIONS 4 */
SELECT COUNT(*) FROM t1;
COUNT(*)
100
DROP TABLE t1;

View File

@@ -1 +0,0 @@
RocksDB: Can't disable allow_os_buffer if allow_mmap_reads is enabled

View File

@@ -0,0 +1,24 @@
#---------------------------
# two threads inserting simultaneously with increment > 1
# Issue #390
#---------------------------
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
SET auto_increment_increment = 2;
SET auto_increment_offset = 1;
INSERT INTO t1 VALUES(NULL);
SET auto_increment_increment = 2;
SET auto_increment_offset = 1;
SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go NO_CLEAR_EVENT';
INSERT INTO t1 VALUES(NULL);
SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go NO_CLEAR_EVENT';
INSERT INTO t1 VALUES(NULL);
SET debug_sync='now WAIT_FOR parked1';
SET debug_sync='now WAIT_FOR parked2';
SET debug_sync='now SIGNAL go';
SET debug_sync='RESET';
SELECT * FROM t1;
a
1
3
5
DROP TABLE t1;

View File

@@ -0,0 +1,96 @@
#---------------------------
# ten threads inserting simultaneously with increment > 1
# Issue #390
#---------------------------
CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 9 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 8 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 7 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 6 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 5 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 4 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 3 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 2 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 1 + 1;
connect con$i, localhost, root,,;
SET auto_increment_increment = 100;
SET auto_increment_offset = 0 + 1;
connection default;
connection con9;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con8;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con7;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con6;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con5;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con4;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con3;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con2;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con1;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection con0;
LOAD DATA INFILE <input_file> INTO TABLE t1;
connection default;
connection con9;
connection con8;
connection con7;
connection con6;
connection con5;
connection con4;
connection con3;
connection con2;
connection con1;
connection con0;
connection default;
SELECT COUNT(*) FROM t1;
COUNT(*)
1000000
SELECT thr, COUNT(pk) FROM t1 GROUP BY thr;
thr COUNT(pk)
0 100000
1 100000
2 100000
3 100000
4 100000
5 100000
6 100000
7 100000
8 100000
9 100000
disconnect con9;
disconnect con8;
disconnect con7;
disconnect con6;
disconnect con5;
disconnect con4;
disconnect con3;
disconnect con2;
disconnect con1;
disconnect con0;
SELECT * FROM t1 ORDER BY pk INTO OUTFILE <output_file>;
All pk values matched their expected values
DROP TABLE t1;

View File

@@ -17,6 +17,21 @@ LOAD DATA INFILE <input_file> INTO TABLE t1;
LOAD DATA INFILE <input_file> INTO TABLE t2; LOAD DATA INFILE <input_file> INTO TABLE t2;
LOAD DATA INFILE <input_file> INTO TABLE t3; LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0; set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
select count(pk) from t1; select count(pk) from t1;
count(pk) count(pk)
10000000 10000000

View File

@@ -0,0 +1,14 @@
DROP TABLE IF EXISTS a;
create table a (id int, value int, primary key (id) comment 'cf_a') engine=rocksdb;
set rocksdb_bulk_load=1;
set rocksdb_commit_in_the_middle=1;
alter table a add index v (value) COMMENT 'cf_a';
set rocksdb_bulk_load=0;
set rocksdb_commit_in_the_middle=0;
select count(*) from a force index(primary);
count(*)
100000
select count(*) from a force index(v);
count(*)
100000
DROP TABLE a;

View File

@@ -16,13 +16,13 @@ pk col1
1 1 1 1
2 2 2 2
3 3 3 3
set @tmp1=@@rocksdb_verify_checksums; set @tmp1=@@rocksdb_verify_row_debug_checksums;
set rocksdb_verify_checksums=1; set rocksdb_verify_row_debug_checksums=1;
set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; set session debug_dbug= "+d,myrocks_simulate_bad_row_read1";
select * from t1 where pk=1; select * from t1 where pk=1;
ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB
set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; set session debug_dbug= "-d,myrocks_simulate_bad_row_read1";
set rocksdb_verify_checksums=@tmp1; set rocksdb_verify_row_debug_checksums=@tmp1;
select * from t1 where pk=1; select * from t1 where pk=1;
pk col1 pk col1
1 1 1 1

View File

@@ -40,3 +40,23 @@ t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a
ALTER TABLE t1 DROP KEY a; ALTER TABLE t1 DROP KEY a;
DROP TABLE t1; DROP TABLE t1;
#
# Issue #376: MyRocks: ORDER BY optimizer is unable to use the index extension
#
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1(a int);
insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
create table t2 (
pk int not null,
a int not null,
b int not null,
primary key(pk),
key(a)
) engine=rocksdb;
insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A;
# This must have type=range, index=a, and must not have 'Using filesort':
explain select * from t2 force index (a) where a=0 and pk>=3 order by pk;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range a a 8 NULL # Using index condition
drop table t0,t1,t2;

View File

@@ -0,0 +1,31 @@
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
create table t1 (a int, primary key(a) comment 'lock_into_cf1') engine=rocksdb;
insert into t1 values (1);
insert into t1 values (2);
create table t2 (a int, primary key(a) comment 'lock_info_cf2') engine=rocksdb;
insert into t2 values (1);
insert into t2 values (2);
set autocommit=0;
select * from t1 for update;
a
1
2
select * from t2 for update;
a
1
2
use information_schema;
select rocksdb_ddl.cf, rocksdb_locks.transaction_id, rocksdb_locks.key
from rocksdb_locks
left join rocksdb_ddl
on rocksdb_locks.column_family_id=rocksdb_ddl.column_family
order by rocksdb_ddl.cf;
cf transaction_id key
lock_info_cf2 _txn_id_ _key_
lock_info_cf2 _txn_id_ _key_
lock_into_cf1 _txn_id_ _key_
lock_into_cf1 _txn_id_ _key_
use test;
DROP TABLE t1;
DROP TABLE t2;

View File

@@ -310,7 +310,7 @@ connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1; connection con1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ERROR: 1213
connection default; connection default;
disconnect con1; disconnect con1;
disconnect con2; disconnect con2;
@@ -333,7 +333,9 @@ connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1; connection con1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction id value
190000 1
ERROR: 0
connection default; connection default;
disconnect con1; disconnect con1;
disconnect con2; disconnect con2;
@@ -406,7 +408,7 @@ BEGIN;
DELETE FROM t0 WHERE id=190000; DELETE FROM t0 WHERE id=190000;
COMMIT; COMMIT;
connection con1; connection con1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ERROR: 1213
COMMIT; COMMIT;
connection default; connection default;
disconnect con1; disconnect con1;
@@ -434,7 +436,8 @@ BEGIN;
DELETE FROM t0 WHERE id=190000; DELETE FROM t0 WHERE id=190000;
COMMIT; COMMIT;
connection con1; connection con1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction id value
ERROR: 0
COMMIT; COMMIT;
connection default; connection default;
disconnect con1; disconnect con1;
@@ -462,7 +465,7 @@ BEGIN;
UPDATE t0 SET id=200001 WHERE id=190000; UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT; COMMIT;
connection con1; connection con1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ERROR: 1213
COMMIT; COMMIT;
connection default; connection default;
disconnect con1; disconnect con1;
@@ -490,7 +493,8 @@ BEGIN;
UPDATE t0 SET id=200001 WHERE id=190000; UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT; COMMIT;
connection con1; connection con1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction id value
ERROR: 0
COMMIT; COMMIT;
connection default; connection default;
disconnect con1; disconnect con1;

View File

@@ -0,0 +1,281 @@
set optimizer_switch='index_merge_sort_union=off';
create table t (a int, b int, c int, d int, e int, primary key(a, b, c, d), key(b, d)) engine=rocksdb;
analyze table t;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 a A 100 NULL NULL LSMTREE
t 0 PRIMARY 2 b A 500 NULL NULL LSMTREE
t 0 PRIMARY 3 c A 2500 NULL NULL LSMTREE
t 0 PRIMARY 4 d A 2500 NULL NULL LSMTREE
t 1 b 1 b A 50 NULL NULL LSMTREE
t 1 b 2 d A 500 NULL NULL LSMTREE
set optimizer_switch = 'skip_scan=off';
explain select b, d from t where d < 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index NULL b 8 NULL # Using where; Using index
rows_read
2500
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select b, d from t where d < 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan
rows_read
260
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select b, d from t where d > 4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index NULL b 8 NULL # Using where; Using index
rows_read
2500
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select b, d from t where d > 4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan
rows_read
1509
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a = 5 and d <= 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
rows_read
251
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a = 5 and d <= 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
126
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select e from t where a = 5 and d <= 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where
rows_read
251
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select e from t where a = 5 and d <= 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where
rows_read
251
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a = 5 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
rows_read
251
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a = 5 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
51
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select e from t where a = 5 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where
rows_read
251
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select e from t where a = 5 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where
rows_read
251
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a in (1, 5) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index
rows_read
502
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a in (1, 5) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
102
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index
rows_read
753
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
153
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index
rows_read
204
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
44
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index
rows_read
765
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
165
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using where; Using index
rows_read
51
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
11
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a+1, b, c, d from t where a = 5 and d < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
rows_read
251
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a+1, b, c, d from t where a = 5 and d < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
101
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select b, c, d from t where a = 5 and d < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
rows_read
251
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select b, c, d from t where a = 5 and d < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
rows_read
101
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=off';
explain select a, b, c, d from t where a = b and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index NULL b 8 NULL # Using where; Using index
rows_read
2500
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
explain select a, b, c, d from t where a = b and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan
rows_read
9
include/diff_tables.inc [temp_orig, temp_skip]
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
set optimizer_switch = 'skip_scan=on';
set optimizer_trace = 'enabled=on';
explain select a, b, c, d from t where a = 5 and d < 3 order by b, c, d;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%order_attribute_not_prefix_in_index%';
count(*)
1
explain select a, b, c, d from t where a = 2 and d >= 98 and e = 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where
select count(*) from information_schema.optimizer_trace where trace like '%query_references_nonkey_column%';
count(*)
1
explain select a, b, c, d from t where a = 5 or b = 2 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%';
count(*)
1
explain select a, b, c, d from t where a = 5 or b = 2 or d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%';
count(*)
1
explain select a, b, c, d from t where a = 5 or d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%';
count(*)
1
explain select a, b, c, d from t where ((a = 5 and b = 2) or a = 2) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan
select count(*) from information_schema.optimizer_trace where trace like '%keypart_in_disjunctive_query%';
count(*)
1
explain select a, b, c, d from t where a > 2 and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 4 NULL # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%prefix_not_const_equality%';
count(*)
1
explain select a, b, c, d from t where a = 2 and (d >= 98 or d < 2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%range_predicate_too_complex%';
count(*)
1
explain select a, b, c, d from t where a = 2 and b = 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using index
select count(*) from information_schema.optimizer_trace where trace like '%no_range_predicate%';
count(*)
1
explain select a, b, c, d from t where a = 2 and c > 2 and d < 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%too_many_range_predicates%';
count(*)
1
explain select a, b, c, d from t where (a < 1 or a = 4 or a = 5) and b in (1, 2, 3) and d >= 98;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index
select count(*) from information_schema.optimizer_trace where trace like '%prefix_not_const_equality%';
count(*)
1
set optimizer_trace = 'enabled=off';
set optimizer_switch= 'skip_scan=off';
drop table t;
set optimizer_switch='index_merge_sort_union=on';

View File

@@ -875,10 +875,9 @@ rocksdb_advise_random_on_open ON
rocksdb_allow_concurrent_memtable_write OFF rocksdb_allow_concurrent_memtable_write OFF
rocksdb_allow_mmap_reads OFF rocksdb_allow_mmap_reads OFF
rocksdb_allow_mmap_writes OFF rocksdb_allow_mmap_writes OFF
rocksdb_allow_os_buffer ON
rocksdb_background_sync OFF rocksdb_background_sync OFF
rocksdb_base_background_compactions 1 rocksdb_base_background_compactions 1
rocksdb_block_cache_size 8388608 rocksdb_block_cache_size 536870912
rocksdb_block_restart_interval 16 rocksdb_block_restart_interval 16
rocksdb_block_size 4096 rocksdb_block_size 4096
rocksdb_block_size_deviation 10 rocksdb_block_size_deviation 10
@@ -900,6 +899,7 @@ rocksdb_create_if_missing ON
rocksdb_create_missing_column_families OFF rocksdb_create_missing_column_families OFF
rocksdb_datadir ./.rocksdb rocksdb_datadir ./.rocksdb
rocksdb_db_write_buffer_size 0 rocksdb_db_write_buffer_size 0
rocksdb_deadlock_detect OFF
rocksdb_debug_optimizer_no_zero_cardinality ON rocksdb_debug_optimizer_no_zero_cardinality ON
rocksdb_default_cf_options rocksdb_default_cf_options
rocksdb_delete_obsolete_files_period_micros 21600000000 rocksdb_delete_obsolete_files_period_micros 21600000000
@@ -938,6 +938,7 @@ rocksdb_paranoid_checks ON
rocksdb_pause_background_work ON rocksdb_pause_background_work ON
rocksdb_perf_context_level 0 rocksdb_perf_context_level 0
rocksdb_pin_l0_filter_and_index_blocks_in_cache ON rocksdb_pin_l0_filter_and_index_blocks_in_cache ON
rocksdb_print_snapshot_conflict_queries OFF
rocksdb_rate_limiter_bytes_per_sec 0 rocksdb_rate_limiter_bytes_per_sec 0
rocksdb_read_free_rpl_tables rocksdb_read_free_rpl_tables
rocksdb_records_in_range 50 rocksdb_records_in_range 50
@@ -949,16 +950,20 @@ rocksdb_skip_fill_cache OFF
rocksdb_skip_unique_check OFF rocksdb_skip_unique_check OFF
rocksdb_skip_unique_check_tables .* rocksdb_skip_unique_check_tables .*
rocksdb_stats_dump_period_sec 600 rocksdb_stats_dump_period_sec 600
rocksdb_store_checksums OFF rocksdb_store_row_debug_checksums OFF
rocksdb_strict_collation_check OFF rocksdb_strict_collation_check OFF
rocksdb_strict_collation_exceptions rocksdb_strict_collation_exceptions
rocksdb_table_cache_numshardbits 6 rocksdb_table_cache_numshardbits 6
rocksdb_table_stats_sampling_pct 10 rocksdb_table_stats_sampling_pct 10
rocksdb_tmpdir
rocksdb_trace_sst_api OFF
rocksdb_unsafe_for_binlog OFF rocksdb_unsafe_for_binlog OFF
rocksdb_use_adaptive_mutex OFF rocksdb_use_adaptive_mutex OFF
rocksdb_use_direct_reads OFF
rocksdb_use_direct_writes OFF
rocksdb_use_fsync OFF rocksdb_use_fsync OFF
rocksdb_validate_tables 1 rocksdb_validate_tables 1
rocksdb_verify_checksums OFF rocksdb_verify_row_debug_checksums OFF
rocksdb_wal_bytes_per_sync 0 rocksdb_wal_bytes_per_sync 0
rocksdb_wal_dir rocksdb_wal_dir
rocksdb_wal_recovery_mode 2 rocksdb_wal_recovery_mode 2
@@ -998,7 +1003,7 @@ insert into t49 values (1,10),(2,20);
begin; begin;
update t49 set a = 100 where pk = 1; update t49 set a = 100 where pk = 1;
connect con1,localhost,root,,; connect con1,localhost,root,,;
set rocksdb_lock_wait_timeout=5000; set rocksdb_lock_wait_timeout=60;
set @var1= to_seconds(now()); set @var1= to_seconds(now());
update t49 set a = 1000 where pk = 1; update t49 set a = 1000 where pk = 1;
connect con2,localhost,root,,; connect con2,localhost,root,,;
@@ -1006,9 +1011,7 @@ kill query $con1_id;
connection con1; connection con1;
ERROR 70100: Query execution was interrupted ERROR 70100: Query execution was interrupted
set @var2= to_seconds(now()); set @var2= to_seconds(now());
"[Jay Edgar] I've updated this query to help determine why it is sometimes failing" select if ((@var2 - @var1) < 60, "passed", (@var2 - @var1)) as 'result';
"(t13541934). If you get an error here (i.e. not 'passed') notify me."
select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result';
result result
passed passed
connection default; connection default;
@@ -1312,7 +1315,7 @@ insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables
set @tmp1= @@rocksdb_max_row_locks; set @tmp1= @@rocksdb_max_row_locks;
set rocksdb_max_row_locks= 20; set rocksdb_max_row_locks= 20;
update t1 set a=a+10; update t1 set a=a+10;
ERROR HY000: Internal error: Operation aborted: Number of locks held by the transaction exceeded @@rocksdb_max_row_locks ERROR HY000: Got error 197 'Number of locks held reached @@rocksdb_max_row_locks.' from ROCKSDB
DROP TABLE t1; DROP TABLE t1;
# #
# Test AUTO_INCREMENT behavior problem, # Test AUTO_INCREMENT behavior problem,
@@ -1478,9 +1481,9 @@ Rocksdb_number_superversion_acquires #
Rocksdb_number_superversion_cleanups # Rocksdb_number_superversion_cleanups #
Rocksdb_number_superversion_releases # Rocksdb_number_superversion_releases #
Rocksdb_rate_limit_delay_millis # Rocksdb_rate_limit_delay_millis #
Rocksdb_sequence_number #
Rocksdb_snapshot_conflict_errors # Rocksdb_snapshot_conflict_errors #
Rocksdb_wal_bytes # Rocksdb_wal_bytes #
Rocksdb_wal_group_syncs #
Rocksdb_wal_synced # Rocksdb_wal_synced #
Rocksdb_write_other # Rocksdb_write_other #
Rocksdb_write_self # Rocksdb_write_self #
@@ -1548,9 +1551,9 @@ ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES
ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS
ROCKSDB_NUMBER_SUPERVERSION_RELEASES ROCKSDB_NUMBER_SUPERVERSION_RELEASES
ROCKSDB_RATE_LIMIT_DELAY_MILLIS ROCKSDB_RATE_LIMIT_DELAY_MILLIS
ROCKSDB_SEQUENCE_NUMBER
ROCKSDB_SNAPSHOT_CONFLICT_ERRORS ROCKSDB_SNAPSHOT_CONFLICT_ERRORS
ROCKSDB_WAL_BYTES ROCKSDB_WAL_BYTES
ROCKSDB_WAL_GROUP_SYNCS
ROCKSDB_WAL_SYNCED ROCKSDB_WAL_SYNCED
ROCKSDB_WRITE_OTHER ROCKSDB_WRITE_OTHER
ROCKSDB_WRITE_SELF ROCKSDB_WRITE_SELF
@@ -1620,9 +1623,9 @@ ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES
ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS
ROCKSDB_NUMBER_SUPERVERSION_RELEASES ROCKSDB_NUMBER_SUPERVERSION_RELEASES
ROCKSDB_RATE_LIMIT_DELAY_MILLIS ROCKSDB_RATE_LIMIT_DELAY_MILLIS
ROCKSDB_SEQUENCE_NUMBER
ROCKSDB_SNAPSHOT_CONFLICT_ERRORS ROCKSDB_SNAPSHOT_CONFLICT_ERRORS
ROCKSDB_WAL_BYTES ROCKSDB_WAL_BYTES
ROCKSDB_WAL_GROUP_SYNCS
ROCKSDB_WAL_SYNCED ROCKSDB_WAL_SYNCED
ROCKSDB_WRITE_OTHER ROCKSDB_WRITE_OTHER
ROCKSDB_WRITE_SELF ROCKSDB_WRITE_SELF
@@ -2464,4 +2467,24 @@ a
10 10
11 11
DROP TABLE t1; DROP TABLE t1;
#
# Issue #411: Setting rocksdb_commit_in_the_middle commits transaction
# without releasing iterator
#
CREATE TABLE t1 (id1 bigint(20),
id2 bigint(20),
id3 bigint(20),
PRIMARY KEY (id1, id2, id3))
DEFAULT CHARSET=latin1;
CREATE TABLE t2 (id1 bigint(20),
id2 bigint(20),
PRIMARY KEY (id1, id2))
DEFAULT CHARSET=latin1;
set rocksdb_commit_in_the_middle=1;
SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size;
set rocksdb_bulk_load_size = 100;
DELETE t2, t1 FROM t2 LEFT JOIN t1 ON t2.id2 = t1.id2 AND t2.id1 = t1.id1 WHERE t2.id1 = 0;
SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size;
SET rocksdb_commit_in_the_middle=0;
DROP TABLE t1, t2;
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;

View File

@@ -17,19 +17,19 @@ where option_type in ('WRITE_BUFFER_SIZE',
'MAX_BYTES_FOR_LEVEL_MULTIPLIER') 'MAX_BYTES_FOR_LEVEL_MULTIPLIER')
order by cf_name, option_type; order by cf_name, option_type;
cf_name option_type value cf_name option_type value
cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
cf1 TARGET_FILE_SIZE_BASE 1048576 cf1 TARGET_FILE_SIZE_BASE 1048576
cf1 WRITE_BUFFER_SIZE 12582912 cf1 WRITE_BUFFER_SIZE 12582912
cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
cf2 TARGET_FILE_SIZE_BASE 1048576 cf2 TARGET_FILE_SIZE_BASE 1048576
cf2 WRITE_BUFFER_SIZE 12582912 cf2 WRITE_BUFFER_SIZE 12582912
default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
default TARGET_FILE_SIZE_BASE 1048576 default TARGET_FILE_SIZE_BASE 1048576
default WRITE_BUFFER_SIZE 12582912 default WRITE_BUFFER_SIZE 12582912
z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
z TARGET_FILE_SIZE_BASE 1048576 z TARGET_FILE_SIZE_BASE 1048576
z WRITE_BUFFER_SIZE 12582912 z WRITE_BUFFER_SIZE 12582912
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 __system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
__system__ TARGET_FILE_SIZE_BASE 1048576 __system__ TARGET_FILE_SIZE_BASE 1048576
__system__ WRITE_BUFFER_SIZE 12582912 __system__ WRITE_BUFFER_SIZE 12582912
@@ -42,19 +42,19 @@ where option_type in ('WRITE_BUFFER_SIZE',
'MAX_BYTES_FOR_LEVEL_MULTIPLIER') 'MAX_BYTES_FOR_LEVEL_MULTIPLIER')
order by cf_name, option_type; order by cf_name, option_type;
cf_name option_type value cf_name option_type value
cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
cf1 TARGET_FILE_SIZE_BASE 2097152 cf1 TARGET_FILE_SIZE_BASE 2097152
cf1 WRITE_BUFFER_SIZE 8388608 cf1 WRITE_BUFFER_SIZE 8388608
cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8 cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8.000000
cf2 TARGET_FILE_SIZE_BASE 1048576 cf2 TARGET_FILE_SIZE_BASE 1048576
cf2 WRITE_BUFFER_SIZE 16777216 cf2 WRITE_BUFFER_SIZE 16777216
default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
default TARGET_FILE_SIZE_BASE 1048576 default TARGET_FILE_SIZE_BASE 1048576
default WRITE_BUFFER_SIZE 12582912 default WRITE_BUFFER_SIZE 12582912
z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
z TARGET_FILE_SIZE_BASE 4194304 z TARGET_FILE_SIZE_BASE 4194304
z WRITE_BUFFER_SIZE 12582912 z WRITE_BUFFER_SIZE 12582912
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 __system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
__system__ TARGET_FILE_SIZE_BASE 1048576 __system__ TARGET_FILE_SIZE_BASE 1048576
__system__ WRITE_BUFFER_SIZE 12582912 __system__ WRITE_BUFFER_SIZE 12582912

View File

@@ -1,12 +1,12 @@
set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums; set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums;
set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums; set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums;
set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct;
drop table if exists t1,t2,t3; drop table if exists t1,t2,t3;
show variables like 'rocksdb_%checksum%'; show variables like 'rocksdb_%checksum%';
Variable_name Value Variable_name Value
rocksdb_checksums_pct 100 rocksdb_checksums_pct 100
rocksdb_store_checksums OFF rocksdb_store_row_debug_checksums OFF
rocksdb_verify_checksums OFF rocksdb_verify_row_debug_checksums OFF
create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
insert into t1 values (1,1,1),(2,2,2),(3,3,3); insert into t1 values (1,1,1),(2,2,2),(3,3,3);
check table t1; check table t1;
@@ -19,7 +19,7 @@ test.t1 check status OK
CHECKTABLE t1: ... 3 index entries checked (0 had checksums) CHECKTABLE t1: ... 3 index entries checked (0 had checksums)
CHECKTABLE t1: 0 table records had checksums CHECKTABLE t1: 0 table records had checksums
drop table t1; drop table t1;
set session rocksdb_store_checksums=on; set session rocksdb_store_row_debug_checksums=on;
create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
insert into t2 values (1,1,1),(2,2,2),(3,3,3); insert into t2 values (1,1,1),(2,2,2),(3,3,3);
check table t2; check table t2;
@@ -34,9 +34,9 @@ test.t2 check status OK
# Now, make a table that has both rows with checksums and without # Now, make a table that has both rows with checksums and without
create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
insert into t3 values (1,1,1),(2,2,2),(3,3,3); insert into t3 values (1,1,1),(2,2,2),(3,3,3);
set session rocksdb_store_checksums=off; set session rocksdb_store_row_debug_checksums=off;
update t3 set b=3 where a=2; update t3 set b=3 where a=2;
set session rocksdb_store_checksums=on; set session rocksdb_store_row_debug_checksums=on;
check table t3; check table t3;
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t3 check status OK test.t3 check status OK
@@ -46,7 +46,7 @@ test.t3 check status OK
CHECKTABLE t3: Checking index b CHECKTABLE t3: Checking index b
CHECKTABLE t3: ... 3 index entries checked (2 had checksums) CHECKTABLE t3: ... 3 index entries checked (2 had checksums)
CHECKTABLE t3: 2 table records had checksums CHECKTABLE t3: 2 table records had checksums
set session rocksdb_store_checksums=on; set session rocksdb_store_row_debug_checksums=on;
set session rocksdb_checksums_pct=5; set session rocksdb_checksums_pct=5;
create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
check table t4; check table t4;
@@ -65,13 +65,13 @@ insert into mtr.test_suppressions values
('Data with incorrect checksum'); ('Data with incorrect checksum');
# 1. Start with mismatch in key checksum of the PK. # 1. Start with mismatch in key checksum of the PK.
set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1"; set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1";
set session rocksdb_verify_checksums=off; set session rocksdb_verify_row_debug_checksums=off;
select * from t3; select * from t3;
pk a b pk a b
1 1 1 1 1 1
2 2 3 2 2 3
3 3 3 3 3 3
set session rocksdb_verify_checksums=on; set session rocksdb_verify_row_debug_checksums=on;
select * from t3; select * from t3;
ERROR HY000: Internal error: Record checksum mismatch ERROR HY000: Internal error: Record checksum mismatch
select * from t4; select * from t4;
@@ -79,13 +79,13 @@ ERROR HY000: Internal error: Record checksum mismatch
set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1"; set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1";
# 2. Continue with mismatch in pk value checksum. # 2. Continue with mismatch in pk value checksum.
set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2"; set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2";
set session rocksdb_verify_checksums=off; set session rocksdb_verify_row_debug_checksums=off;
select * from t3; select * from t3;
pk a b pk a b
1 1 1 1 1 1
2 2 3 2 2 3
3 3 3 3 3 3
set session rocksdb_verify_checksums=on; set session rocksdb_verify_row_debug_checksums=on;
select * from t3; select * from t3;
ERROR HY000: Internal error: Record checksum mismatch ERROR HY000: Internal error: Record checksum mismatch
select * from t4; select * from t4;
@@ -123,7 +123,7 @@ ERROR HY000: Internal error: Record checksum mismatch
select a from t4 force index(a) where a<1000000; select a from t4 force index(a) where a<1000000;
ERROR HY000: Internal error: Record checksum mismatch ERROR HY000: Internal error: Record checksum mismatch
set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1";
set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums;
set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums;
set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct;
drop table t2,t3,t4; drop table t2,t3,t4;

View File

@@ -0,0 +1,70 @@
set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect;
set global rocksdb_lock_wait_timeout = 100000;
set global rocksdb_deadlock_detect = ON;
create table t (i int primary key);
create table r1 (id int primary key, value int);
insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
create table r2 like r1;
insert into r2 select * from r1;
connect con1,localhost,root,,;
begin;
update r2 set value=100 where id=9;
connect con2,localhost,root,,;
begin;
update r1 set value=100 where id=8;
select * from r2 for update;;
connection con1;
select * from r1 for update;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
rollback;
connection con2;
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
rollback;
connection con1;
begin;
insert into t values (1);
connection con2;
begin;
insert into t values (2);
connect con3,localhost,root,,;
begin;
insert into t values (3);
connection con1;
select * from t where i = 2 for update;
connection con2;
select * from t where i = 3 for update;
connection con3;
select * from t;
i
3
insert into t values (4), (1);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
# Statement should be rolled back
select * from t;
i
3
rollback;
connection con2;
i
rollback;
connection con1;
i
rollback;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t,r1,r2;

View File

@@ -0,0 +1,70 @@
set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect;
set global rocksdb_lock_wait_timeout = 100000;
set global rocksdb_deadlock_detect = ON;
create table t (i int primary key);
create table r1 (id int primary key, value int);
insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
create table r2 like r1;
insert into r2 select * from r1;
connect con1,localhost,root,,;
begin;
update r2 set value=100 where id=9;
connect con2,localhost,root,,;
begin;
update r1 set value=100 where id=8;
select * from r2 for update;;
connection con1;
select * from r1 for update;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
rollback;
connection con2;
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
rollback;
connection con1;
begin;
insert into t values (1);
connection con2;
begin;
insert into t values (2);
connect con3,localhost,root,,;
begin;
insert into t values (3);
connection con1;
select * from t where i = 2 for update;
connection con2;
select * from t where i = 3 for update;
connection con3;
select * from t;
i
3
insert into t values (4), (1);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
# Statement should be rolled back
select * from t;
i
3
rollback;
connection con2;
i
rollback;
connection con1;
i
rollback;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t,r1,r2;

View File

@@ -0,0 +1,8 @@
create table t1 (a int primary key, b int) engine=rocksdb;
set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect;
set global rocksdb_lock_wait_timeout = 100000;
set global rocksdb_deadlock_detect = ON;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t1;

View File

@@ -0,0 +1,8 @@
create table t1 (a int primary key, b int) engine=rocksdb;
set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect;
set global rocksdb_lock_wait_timeout = 100000;
set global rocksdb_deadlock_detect = ON;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t1;

View File

@@ -7,6 +7,7 @@ pk
1 1
connect con1,localhost,root,,; connect con1,localhost,root,,;
connection con1; connection con1;
call mtr.add_suppression("Got snapshot conflict errors");
### Connection con1 ### Connection con1
set @@rocksdb_lock_wait_timeout=500; set @@rocksdb_lock_wait_timeout=500;
set autocommit=0; set autocommit=0;

View File

@@ -0,0 +1,29 @@
DROP TABLE IF EXISTS t1, t2;
CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 SELECT * FROM t1;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
SAVEPOINT a;
SELECT * FROM t1 ORDER BY pk;
a b pk
1 a 1
2 b 2
3 a 3
ROLLBACK TO SAVEPOINT a;
SAVEPOINT a;
SELECT * FROM t2 ORDER BY pk;
a b pk
1 a 1
2 b 2
3 a 3
ROLLBACK TO SAVEPOINT a;
connection con2;
ALTER TABLE t1 RENAME TO t3;
connection default;
DROP TABLE t2, t3;
disconnect con1;
disconnect con2;

View File

@@ -13,11 +13,10 @@ SELECT a,b FROM t1 WHERE b='a';
a b a b
1 a 1 a
3 a 3 a
#
# Currently, SELECT ... LOCK IN SHARE MODE works like
# SELECT FOR UPDATE
SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction a b
1 a
3 a
UPDATE t1 SET b='c' WHERE b='a'; UPDATE t1 SET b='c' WHERE b='a';
ERROR HY000: Lock wait timeout exceeded; try restarting transaction ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1; connection con1;

View File

@@ -407,7 +407,8 @@ SNAPSHOTS
--------- ---------
LIST OF SNAPSHOTS FOR EACH SESSION: LIST OF SNAPSHOTS FOR EACH SESSION:
---SNAPSHOT, ACTIVE NUM sec ---SNAPSHOT, ACTIVE NUM sec
MySQL thread id TID, OS thread handle PTR MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION
SHOW ENGINE rocksdb TRANSACTION STATUS
lock count 0, write count 0 lock count 0, write count 0
----------------------------------------- -----------------------------------------
END OF ROCKSDB TRANSACTION MONITOR OUTPUT END OF ROCKSDB TRANSACTION MONITOR OUTPUT

View File

@@ -21,8 +21,8 @@ index t3_1(b) comment 'rev:cf_t4'
) engine=rocksdb; ) engine=rocksdb;
SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1'; SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1';
table_name table_rows table_name table_rows
t2 4999 t2 1000
t3 4999 t3 1000
SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1'; SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1';
CASE WHEN table_rows < 100000 then 'true' else 'false' end CASE WHEN table_rows < 100000 then 'true' else 'false' end
true true

View File

@@ -0,0 +1,26 @@
# If rocksdb_tmpdir is NULL or "", temporary file will be created in
# server configuration variable location(--tmpdir)
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB;
show session variables like 'rocksdb_tmpdir';
Variable_name Value
rocksdb_tmpdir
# Connection con1
show session variables like 'rocksdb_tmpdir';
Variable_name Value
rocksdb_tmpdir
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
DROP TABLE t1;
# rocksdb_tmpdir with valid location.
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB;
set @tmpdir = @@global.tmpdir;
set global rocksdb_tmpdir = @tmpdir;
show session variables like 'rocksdb_tmpdir';
Variable_name Value
rocksdb_tmpdir
# Connection con3
show session variables like 'rocksdb_tmpdir';
Variable_name Value
rocksdb_tmpdir MYSQL_TMP_DIR/mysqld.1
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
set global rocksdb_tmpdir=NULL;
DROP TABLE t1;

View File

@@ -0,0 +1,13 @@
DROP TABLE IF EXISTS t1;
create table t1 (a int) engine=rocksdb;
insert into t1 values (1);
insert into t1 values (2);
set autocommit=0;
select * from t1 for update;
a
1
2
select * from information_schema.rocksdb_trx;
TRANSACTION_ID STATE NAME WRITE_COUNT LOCK_COUNT TIMEOUT_SEC WAITING_KEY WAITING_COLUMN_FAMILY_ID IS_REPLICATION SKIP_TRX_API READ_ONLY HAS_DEADLOCK_DETECTION NUM_ONGOING_BULKLOAD THREAD_ID QUERY
_TRX_ID_ STARTED _NAME_ 0 2 1 _KEY_ 0 0 0 0 0 0 _THREAD_ID_ select * from information_schema.rocksdb_trx
DROP TABLE t1;

View File

@@ -0,0 +1,15 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
DROP TABLE IF EXISTS t1;
include/stop_slave.inc
create table t1 (a int) engine=rocksdb;
show variables like 'rocksdb_rpl_skip_tx_api';
Variable_name Value
rocksdb_rpl_skip_tx_api ON
include/start_slave.inc
found
DROP TABLE t1;
include/rpl_end.inc

View File

@@ -1,21 +1,3 @@
set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t (id int not null auto_increment primary key,
c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
key sk (c));
set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
insert into t (c) values ('A'), ('b'), ('C');
explain select c from t;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index NULL sk 4 NULL # Using index
select c from t;
c
A
b
C
select c from t where c = 'a';
c
A
drop table t;
set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans";
create table t (id int not null auto_increment primary key, create table t (id int not null auto_increment primary key,
c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci,
@@ -91,7 +73,7 @@ c1
Asdf Asdf
bbbb bbbb
drop table t; drop table t;
set session rocksdb_verify_checksums = on; set session rocksdb_verify_row_debug_checksums = on;
create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1;
insert into t values (1, ' a'); insert into t values (1, ' a');
explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);

View File

@@ -1,81 +1,5 @@
drop table if exists t1, t2; drop table if exists t1, t2;
# #
# Check that DECIMAL PK
#
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
# First, make the server to create a dataset in the old format:
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t1 (
pk1 decimal(32,16),
pk2 decimal(32,16),
pk3 decimal(32,16),
a smallint not null,
primary key(pk1, pk2, pk3)
);
insert into t1
select
A.a, B.a, C.a, 1234
from t0 A, t0 B, t0 C;
#
# Looking at the table size, one can tell that the data is stored using
# old format:
#
set global rocksdb_force_flush_memtable_now=1;
# Check the format version:
select table_name,index_name,kv_format_version
from information_schema.ROCKSDB_DDL
where TABLE_SCHEMA=database() AND table_name='t1';
table_name index_name kv_format_version
t1 PRIMARY 10
flush tables;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
# Check that the new server reads the data in the old format:
select * from t1 order by pk1,pk2,pk3 limit 5;
pk1 pk2 pk3 a
0.0000000000000000 0.0000000000000000 0.0000000000000000 1234
0.0000000000000000 0.0000000000000000 1.0000000000000000 1234
0.0000000000000000 0.0000000000000000 2.0000000000000000 1234
0.0000000000000000 0.0000000000000000 3.0000000000000000 1234
0.0000000000000000 0.0000000000000000 4.0000000000000000 1234
#
# Ok, now, enable the new data format:
#
create table t2 (
pk1 decimal(32,16),
pk2 decimal(32,16),
pk3 decimal(32,16),
a smallint not null,
primary key(pk1, pk2, pk3)
);
insert into t2
select
A.a, B.a, C.a, 1234
from t0 A, t0 B, t0 C;
set global rocksdb_force_flush_memtable_now=1;
larger
1
# This should show the new PK data fromat
select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL
where TABLE_SCHEMA=database() AND table_name='t2';
table_name index_name kv_format_version
t2 PRIMARY 11
#
# Check that the server is able to read BOTH the old and the new formats:
#
select * from t2 limit 3;
pk1 pk2 pk3 a
0.0000000000000000 0.0000000000000000 0.0000000000000000 1234
0.0000000000000000 0.0000000000000000 1.0000000000000000 1234
0.0000000000000000 0.0000000000000000 2.0000000000000000 1234
select * from t1 limit 3;
pk1 pk2 pk3 a
0.0000000000000000 0.0000000000000000 0.0000000000000000 1234
0.0000000000000000 0.0000000000000000 1.0000000000000000 1234
0.0000000000000000 0.0000000000000000 2.0000000000000000 1234
drop table t1,t2;
drop table t0;
#
# Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly.
# (Decoding happens from the mem-comparable image in the index, regardless # (Decoding happens from the mem-comparable image in the index, regardless
# of whether the value part has original value or not) # of whether the value part has original value or not)

View File

@@ -727,9 +727,9 @@ index_name count
email_i 1 email_i 1
drop table t; drop table t;
set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct;
set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums; set @save_rocksdb_verify_row_debug_checksums = @@session.rocksdb_verify_row_debug_checksums;
set global rocksdb_checksums_pct = 100; set global rocksdb_checksums_pct = 100;
set session rocksdb_verify_checksums = on; set session rocksdb_verify_row_debug_checksums = on;
create table t (id int primary key, email varchar(100), KEY email_i (email(30))); create table t (id int primary key, email varchar(100), KEY email_i (email(30)));
insert into t values (1, 'a'); insert into t values (1, 'a');
explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
@@ -740,4 +740,4 @@ index_name count
email_i 1 email_i 1
drop table t; drop table t;
set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct;
set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums; set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums;

View File

@@ -1,254 +0,0 @@
drop table if exists t1,t2;
set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans";
#
# Issue 257: Sort order for varchars is different between
# MyISAM/InnoDB vs MyRocks
#
create table t1 (
pk varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci,
col1 varchar(64),
primary key (pk)
);
insert into t1 values ('a','a');
insert into t1 values ('a ', 'a-space');
ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
insert into t1 values('b ', 'b-2x-space');
insert into t1 values ('b', 'b');
ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
select pk, hex(pk), col1 from t1;
pk hex(pk) col1
a 61 a
b 622020 b-2x-space
insert into t1 values ('a\t', 'a-tab');
insert into t1 values ('a \t', 'a-space-tab');
select pk, hex(pk), col1 from t1 order by pk;
pk hex(pk) col1
a 6109 a-tab
a 612009 a-space-tab
a 61 a
b 622020 b-2x-space
# Try longer values
insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
select * from t1;
pk col1
a a-tab
a a-space-tab
a a
b b-2x-space
c c-10-x-space
drop table t1;
# Secondary index
create table t1 (
pk int not null primary key,
col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci,
col2 varchar(64),
key (col1)
);
insert into t1 values (0, 'ab', 'a-b');
insert into t1 values (1, 'a ', 'a-space');
insert into t1 values (2, 'a', 'a');
insert into t1 values (3, 'a \t', 'a-tab');
# Must show 'using index' for latin1_bin and utf8_bin:
explain
select col1, hex(col1) from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL col1 195 NULL # Using index
select col1, hex(col1) from t1;
col1 hex(col1)
a 61202009
a 6120
a 61
ab 6162
# Must show 'using index' for latin1_bin and utf8_bin:
explain
select col1, hex(col1) from t1 where col1 < 'b';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index
select col1, hex(col1) from t1 where col1 < 'b';
col1 hex(col1)
a 61202009
a 6120
a 61
ab 6162
delete from t1;
insert into t1 values(10, '', 'empty');
insert into t1 values(11, repeat(' ', 8), '8x-space');
insert into t1 values(12, repeat(' ', 16), '16x-space');
insert into t1 values(13, repeat(' ', 24), '24x-space');
insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
insert into t1 values(21, repeat(' ', 9), '9x-space');
insert into t1 values(22, repeat(' ',17), '17x-space');
insert into t1 values(23, repeat(' ',18), '18x-space');
explain
select pk, col1, hex(col1), length(col1) from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 # NULL col1 195 NULL # Using index
select pk, col1, hex(col1), length(col1) from t1;
pk col1 hex(col1) length(col1)
10 0
11 2020202020202020 8
12 20202020202020202020202020202020 16
13 202020202020202020202020202020202020202020202020 24
21 202020202020202020 9
22 2020202020202020202020202020202020 17
23 202020202020202020202020202020202020 18
14 a 2020202020202020202020202020202061 17
drop table t1;
create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
insert into t1 values (1, concat('a', repeat(' ', 300)));
insert into t1 values (2, concat('b', repeat(' ', 300)));
select pk,length(a) from t1 force index(a) where a < 'zz';
pk length(a)
1 301
2 301
select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
pk length(a) rtrim(a)
1 301 a
2 301 b
select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
pk length(a) rtrim(a)
1 301 a
2 301 b
drop table t1;
set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans";
#
# Check backwards compatibility:
#
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
# Create the tables in the old format
create table t1 (
pk varchar(64) collate latin1_bin,
col1 varchar(64),
primary key (pk)
);
insert into t1 values ('a','a');
# The following will not produce an error:
insert into t1 values ('a ', 'a-space');
select pk, hex(pk), col1 from t1;
pk hex(pk) col1
a 61 a
a 6120 a-space
create table t2 (
pk int not null primary key,
col1 varchar(64) collate latin1_bin,
col2 varchar(64),
unique key (col1)
);
insert into t2 values (0, 'ab', 'a-b');
# The following will not produce an error:
insert into t2 values (1, 'a ', 'a-space');
insert into t2 values (2, 'a', 'a');
select pk, col1, hex(col1), col2 from t2;
pk col1 hex(col1) col2
0 ab 6162 a-b
1 a 6120 a-space
2 a 61 a
# Check the format version:
select table_name,index_name,kv_format_version
from information_schema.ROCKSDB_DDL
where TABLE_SCHEMA=database() AND table_name in ('t1','t2');
table_name index_name kv_format_version
t1 PRIMARY 10
t2 PRIMARY 10
t2 col1 10
flush tables;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
select pk, hex(pk), col1 from t1;
pk hex(pk) col1
a 61 a
a 6120 a-space
select pk, col1, hex(col1), col2 from t2;
pk col1 hex(col1) col2
0 ab 6162 a-b
1 a 6120 a-space
2 a 61 a
select pk, hex(pk), col1 from t1;
pk hex(pk) col1
a 61 a
a 6120 a-space
select pk, col1, hex(col1), col2 from t2;
pk col1 hex(col1) col2
0 ab 6162 a-b
1 a 6120 a-space
2 a 61 a
drop table t1,t2;
#
# General upgrade tests to see that they work.
#
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t2 (
id int primary key,
col1 varchar(64) collate latin1_swedish_ci,
unique key (col1)
) engine=rocksdb;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
insert into t2 values (1, 'a');
insert into t2 values (2, 'b');
insert into t2 values (3, 'c');
insert into t2 values (4, 'c ');
select col1 from t2;
col1
a
b
c
c
delete from t2 where id = 4;
alter table t2 engine=rocksdb;
select col1 from t2;
col1
a
b
c
insert into t2 values (4, 'c ');
ERROR 23000: Duplicate entry 'c ' for key 'col1'
drop table t2;
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t2 (
id int primary key,
col1 varchar(64) collate latin1_bin,
unique key (col1)
) engine=rocksdb;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
insert into t2 values (1, 'a');
insert into t2 values (2, 'b');
insert into t2 values (3, 'c');
insert into t2 values (4, 'c ');
select col1 from t2;
col1
a
b
c
c
delete from t2 where id = 4;
alter table t2 engine=rocksdb;
select col1 from t2;
col1
a
b
c
insert into t2 values (4, 'c ');
ERROR 23000: Duplicate entry 'c ' for key 'col1'
drop table t2;
#
# Check what happens when one tries to 'upgrade' to the new data format
# and causes a unique key violation:
#
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t2 (
pk int not null primary key,
col1 varchar(64) collate latin1_bin,
col2 varchar(64),
unique key (col1)
);
insert into t2 values (1, 'a ', 'a-space');
insert into t2 values (2, 'a', 'a');
select * from t2;
pk col1 col2
1 a a-space
2 a a
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
alter table t2 engine=rocksdb;
ERROR 23000: Duplicate entry 'a' for key 'col1'
drop table t2;

View File

@@ -0,0 +1,2 @@
RocksDB: Can't enable both use_direct_reads and allow_mmap_reads
RocksDB: Can't enable both use_direct_writes and allow_mmap_writes

View File

@@ -0,0 +1 @@
--binlog-format=row

View File

@@ -0,0 +1,64 @@
--source include/have_rocksdb.inc
--source include/have_log_bin.inc
--echo # Disable for valgrind because this takes too long
--source include/not_valgrind.inc
--disable_warnings
DROP DATABASE IF EXISTS mysqlslap;
--enable_warnings
CREATE DATABASE mysqlslap;
USE mysqlslap;
CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb;
--echo # 2PC enabled, MyRocks durability enabled
SET GLOBAL rocksdb_disable_2pc=0;
SET GLOBAL rocksdb_write_sync=1;
--echo ## 2PC + durability + single thread
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--echo ## 2PC + durability + group commit
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--echo # 2PC enabled, MyRocks durability disabled
SET GLOBAL rocksdb_disable_2pc=0;
SET GLOBAL rocksdb_write_sync=0;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--echo # 2PC disabled, MyRocks durability enabled
SET GLOBAL rocksdb_disable_2pc=1;
SET GLOBAL rocksdb_write_sync=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
SET GLOBAL rocksdb_disable_2pc=1;
SET GLOBAL rocksdb_write_sync=0;
DROP TABLE t1;
DROP DATABASE mysqlslap;

View File

@@ -1,5 +1,4 @@
--source include/have_rocksdb.inc --source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/have_partition.inc --source include/have_partition.inc
--disable_warnings --disable_warnings
@@ -169,117 +168,6 @@ SELECT COUNT(*) FROM t1;
DROP TABLE t1; DROP TABLE t1;
#
# test crash recovery
#
CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 5);
INSERT INTO t1 (a, b) VALUES (2, 6);
INSERT INTO t1 (a, b) VALUES (3, 7);
--echo # crash_during_online_index_creation
flush logs;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
SET SESSION debug_dbug="+d,crash_during_online_index_creation";
--error 2013
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
--enable_reconnect
--source include/wait_until_connected_again.inc
SET SESSION debug_dbug="-d,crash_during_online_index_creation";
SHOW CREATE TABLE t1;
CHECK TABLE t1;
DROP TABLE t1;
#
# Test crash recovery with partitioned tables
#
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
--disable_query_log
let $max = 100;
let $i = 1;
while ($i <= $max) {
let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
inc $i;
eval $insert;
}
--enable_query_log
--echo # crash_during_index_creation_partition
flush logs;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
SET SESSION debug_dbug="+d,crash_during_index_creation_partition";
--error 2013
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
--enable_reconnect
--source include/wait_until_connected_again.inc
SET SESSION debug_dbug="-d,crash_during_index_creation_partition";
SHOW CREATE TABLE t1;
# here, the index numbers should be higher because previously 4 index numbers
# were allocated for the partitioned table
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SELECT * FROM t1 ORDER BY i LIMIT 10;
SELECT COUNT(*) FROM t1;
DROP TABLE t1;
#
# Test rollback on partitioned tables for inplace alter
#
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
--disable_query_log
let $max = 100;
let $i = 1;
while ($i <= $max) {
let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
inc $i;
eval $insert;
}
--enable_query_log
--echo # crash_during_index_creation_partition
flush logs;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback";
--echo # expected assertion failure from sql layer here for alter rollback
call mtr.add_suppression("Assertion `0' failed.");
call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
--error 2013
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
--enable_reconnect
--source include/wait_until_connected_again.inc
SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback";
SHOW CREATE TABLE t1;
# here, the index numbers should be higher because previously 4 index numbers
# were allocated for the partitioned table
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
SELECT COUNT(*) FROM t1;
DROP TABLE t1;
# test failure in prepare phase (due to collation) # test failure in prepare phase (due to collation)
set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check;
set global rocksdb_strict_collation_check=1; set global rocksdb_strict_collation_check=1;
@@ -289,6 +177,171 @@ CREATE TABLE t1 (a INT, b TEXT);
ALTER TABLE t1 ADD KEY kb(b(10)); ALTER TABLE t1 ADD KEY kb(b(10));
ALTER TABLE t1 ADD PRIMARY KEY(a); ALTER TABLE t1 ADD PRIMARY KEY(a);
DROP TABLE t1; DROP TABLE t1;
set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check;
# make sure race condition between connection close and alter on another
# connection is handled
set global rocksdb_bulk_load=1;
--echo # Establish connection con1 (user=root)
connect (con1,localhost,root,,);
--echo # Switch to connection con1
connection con1;
show global variables like 'rocksdb_bulk_load';
show session variables like 'rocksdb_bulk_load';
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1,1);
# Disconnect connection 1, this starts the code path that will call
# rocksdb_close_connection, ending the bulk load.
--echo # Disconnecting on con1
disconnect con1;
--echo # Establish connection con2 (user=root)
connect (con2,localhost,root,,);
--echo # Switch to connection con2
connection con2;
# when alter table happens, it tries to close all other TABLE instances
# when acquiring the exclusive lock for alter table (this happens in SQL layer)
# make sure bulk_load now handles this possible race condition properly
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY);
SELECT COUNT(*) FROM t1 FORCE INDEX(kj);
DROP TABLE t1;
disconnect con2;
# make sure implicilty closing the alter from another session works
--echo # Establish connection con1 (user=root)
connect (con1,localhost,root,,);
--echo # Establish connection con2 (user=root)
connect (con2,localhost,root,,);
--echo # Switch to connection con1
connection con1;
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
set rocksdb_bulk_load=1;
INSERT INTO t1 VALUES (1,1);
--echo # Switch to connection con2
connection con2;
# here, the bulk load hasn't been completed yet, and we are in conn2
# therefore select count returns 0
SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY);
# implicilty close the table from connection 2
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY);
SELECT COUNT(*) FROM t1 FORCE INDEX(kj);
set global rocksdb_bulk_load=0;
DROP TABLE t1;
connection default;
SET @prior_rocksdb_merge_combine_read_size= @@rocksdb_merge_combine_read_size;
SET @prior_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check;
SET @prior_rocksdb_merge_buf_size = @@rocksdb_merge_buf_size;
SET global rocksdb_strict_collation_check = off;
SET session rocksdb_merge_combine_read_size = 566;
SET session rocksdb_merge_buf_size = 336;
show variables like '%rocksdb_bulk_load%';
CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB;
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
INSERT INTO t1 (a) VALUES (REPEAT("a", 80));
ALTER TABLE t1 ADD INDEX ka(a), ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
CHECK TABLE t1;
--sorted_result
SELECT * FROM t1 FORCE INDEX(ka) WHERE a > "";
DROP TABLE t1;
SET session rocksdb_merge_buf_size = @prior_rocksdb_merge_buf_size;
SET session rocksdb_merge_combine_read_size = @prior_rocksdb_merge_combine_read_size;
SET global rocksdb_strict_collation_check = @prior_rocksdb_strict_collation_check;
# Test to make sure index statistics are updating properly
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
--disable_query_log
let $max = 100;
let $i = 1;
while ($i <= $max) {
let $insert = INSERT INTO t1 VALUES ($i, $i);
inc $i;
eval $insert;
}
--enable_query_log
set global rocksdb_force_flush_memtable_now=1;
--let $data_length_old = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1)
## uncomment to see the actual values
#--replace_column 8 #
#SHOW TABLE STATUS WHERE name LIKE 't1';
# Now do an alter and see what happens
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1)
--disable_query_log
--eval select $data_length_old < $data_length_new as "larger"
--source include/restart_mysqld.inc
--source include/wait_until_connected_again.inc
--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1)
--disable_query_log
--eval select $data_length_old < $data_length_new as "larger"
analyze table t1;
--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1)
--disable_query_log
--eval select $data_length_old < $data_length_new as "larger"
--source include/restart_mysqld.inc
--source include/wait_until_connected_again.inc
--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1)
--disable_query_log
--eval select $data_length_old < $data_length_new as "larger"
# verifying multiple analyze table won't change stats
--disable_query_log
let $max = 10;
let $i = 1;
while ($i <= $max) {
let $analyze = ANALYZE TABLE t1;
inc $i;
eval $analyze;
}
--enable_query_log
--let $data_length_new2 = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1)
--eval select $data_length_new2 < $data_length_new * 1.5 as "same"
--enable_query_log
## uncomment to see the actual values
#--replace_column 8 #
#SHOW TABLE STATUS WHERE name LIKE 't1';
DROP TABLE t1;

View File

@@ -0,0 +1,117 @@
--source include/have_rocksdb.inc
--source include/have_debug.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# test crash recovery
#
CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 5);
INSERT INTO t1 (a, b) VALUES (2, 6);
INSERT INTO t1 (a, b) VALUES (3, 7);
--echo # crash_during_online_index_creation
flush logs;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
SET SESSION debug="+d,crash_during_online_index_creation";
--error 2013
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
--enable_reconnect
--source include/wait_until_connected_again.inc
SET SESSION debug="-d,crash_during_online_index_creation";
SHOW CREATE TABLE t1;
CHECK TABLE t1;
DROP TABLE t1;
#
# Test crash recovery with partitioned tables
#
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
--disable_query_log
let $max = 100;
let $i = 1;
while ($i <= $max) {
let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
inc $i;
eval $insert;
}
--enable_query_log
--echo # crash_during_index_creation_partition
flush logs;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
SET SESSION debug="+d,crash_during_index_creation_partition";
--error 2013
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
--enable_reconnect
--source include/wait_until_connected_again.inc
SET SESSION debug="-d,crash_during_index_creation_partition";
SHOW CREATE TABLE t1;
# here, the index numbers should be higher because previously 4 index numbers
# were allocated for the partitioned table
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SELECT * FROM t1 ORDER BY i LIMIT 10;
SELECT COUNT(*) FROM t1;
DROP TABLE t1;
#
# Test rollback on partitioned tables for inplace alter
#
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
--disable_query_log
let $max = 100;
let $i = 1;
while ($i <= $max) {
let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
inc $i;
eval $insert;
}
--enable_query_log
--echo # crash_during_index_creation_partition
flush logs;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
SET SESSION debug="+d,myrocks_simulate_index_create_rollback";
--echo # expected assertion failure from sql layer here for alter rollback
call mtr.add_suppression("Assertion `0' failed.");
call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
--error 2013
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
--enable_reconnect
--source include/wait_until_connected_again.inc
SET SESSION debug="-d,myrocks_simulate_index_create_rollback";
SHOW CREATE TABLE t1;
# here, the index numbers should be higher because previously 4 index numbers
# were allocated for the partitioned table
ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
SELECT COUNT(*) FROM t1;
DROP TABLE t1;

View File

@@ -1,5 +1,4 @@
--source include/have_rocksdb.inc --source include/have_rocksdb.inc
--source include/have_debug.inc
--disable_warnings --disable_warnings
drop table if exists t1; drop table if exists t1;

View File

@@ -1,30 +0,0 @@
--source include/have_rocksdb.inc
# Issue221
# Turning on --rocksdb-allow-mmap-reads while having --rocksdb-allow-os-buffer
# off caused an assertion in RocksDB. Now it should not be allowed and the
# server will not start with that configuration
# Write file to make mysql-test-run.pl expect the "crash", but don't restart
# the serve runtil it is told to
--let $_server_id= `SELECT @@server_id`
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
--exec echo "wait" >$_expect_file_name
shutdown_server 10;
# Clear the log
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
# Attempt to restart the server with invalid options
--exec echo "restart:--rocksdb_allow_os_buffer=0 --rocksdb_allow_mmap_reads=1" >$_expect_file_name
--sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart
--exec echo "restart:" >$_expect_file_name
# Cleanup
--enable_reconnect
--source include/wait_until_connected_again.inc
--disable_reconnect
# We should now have an error message
--exec grep "disable allow_os_buffer" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2

View File

@@ -0,0 +1,53 @@
--source include/have_rocksdb.inc
--source include/have_debug_sync.inc
--echo #---------------------------
--echo # two threads inserting simultaneously with increment > 1
--echo # Issue #390
--echo #---------------------------
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
# Set up connections
connect (con1, localhost, root,,);
SET auto_increment_increment = 2;
SET auto_increment_offset = 1;
# Insert one row to set up the conditions that caused the original failure
INSERT INTO t1 VALUES(NULL);
connect (con2, localhost, root,,);
SET auto_increment_increment = 2;
SET auto_increment_offset = 1;
# Start each thread on an insert that will block waiting for a signal
connection con1;
SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go NO_CLEAR_EVENT';
send INSERT INTO t1 VALUES(NULL);
connection con2;
SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go NO_CLEAR_EVENT';
send INSERT INTO t1 VALUES(NULL);
# Wait for both threads to be at debug_sync point
connection default;
SET debug_sync='now WAIT_FOR parked1';
SET debug_sync='now WAIT_FOR parked2';
# Signal both threads to continue
SET debug_sync='now SIGNAL go';
connection con1;
reap;
connection con2;
reap;
connection default;
SET debug_sync='RESET';
disconnect con1;
disconnect con2;
SELECT * FROM t1;
DROP TABLE t1;

View File

@@ -0,0 +1,141 @@
--source include/have_rocksdb.inc
--echo #---------------------------
--echo # ten threads inserting simultaneously with increment > 1
--echo # Issue #390
--echo #---------------------------
# Run 10 simulatenous threads each inserting 10,000 rows
let $num_threads = 10;
let $num_rows_per_thread = 100000;
# Create the table with an AUTO_INCREMENT primary key and a separate colum
# to store which thread created the row
CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb;
# For each thread...
# 1) set up a connection
# 2) create a file that can be used for LOAD DATA INFILE ...
let $i = `SELECT $num_threads`;
while ($i > 0)
{
dec $i;
# Set up connection
connect (con$i, localhost, root,,);
# Set up the auto_increment_* variables for each thread
eval SET auto_increment_increment = 100;
eval SET auto_increment_offset = $i + 1;
let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`;
# Pass variables into perl
let ROCKSDB_INFILE = $file;
let ROCKSDB_THREAD = `SELECT $i`;
let ROCKSDB_ROWS_PER_THREAD = `SELECT $num_rows_per_thread`;
# Create a file to load
perl;
my $fn = $ENV{'ROCKSDB_INFILE'};
my $thr = $ENV{'ROCKSDB_THREAD'};
my $num = $ENV{'ROCKSDB_ROWS_PER_THREAD'};
open(my $fh, '>>', $fn) || die "perl open($fn): $!";
for (my $ii = 0; $ii < $num; $ii++)
{
print $fh "\\N\t$thr\n"
}
close($fh);
EOF
}
# For each connection start the LOAD DATA INFILE in the background
connection default;
let $i = `SELECT $num_threads`;
while ($i > 0)
{
dec $i;
connection con$i;
let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`;
--disable_query_log
--echo LOAD DATA INFILE <input_file> INTO TABLE t1;
send_eval LOAD DATA INFILE '$file' INTO TABLE t1;
--enable_query_log
}
# Reap each connection's background result
connection default;
let $i = `SELECT $num_threads`;
while ($i > 0)
{
dec $i;
connection con$i;
reap;
}
# Make sure we have the required number of rows
connection default;
SELECT COUNT(*) FROM t1;
SELECT thr, COUNT(pk) FROM t1 GROUP BY thr;
# Cleanup the connection and file used for LOAD DATA INFILE
let $i = `SELECT $num_threads`;
while ($i > 0)
{
dec $i;
disconnect con$i;
let $file = `SELECT CONCAT(@@datadir, "test_insert_", "$i", ".txt")`;
remove_file $file;
}
# Validate each row. For each row, the created 'thr' column shows which
# thread created the row. The pk that was automatically generated should
# therefore match a certain pattern. For thread 0, the pk should be in
# the sequence [1, 101, 201, 301, ...]; for thread 1, it should be in the
# sequence [2, 102, 202, 302, ...], etc. The pk for each row should be
# smallest value in the sequence for thread 'thr' that is greater than
# the pk in the previous row.
let $file = `SELECT CONCAT(@@datadir, "test_export.txt")`;
--disable_query_log
--echo SELECT * FROM t1 ORDER BY pk INTO OUTFILE <output_file>;
eval SELECT * FROM t1 ORDER BY pk INTO OUTFILE "$file";
--enable_query_log
let ROCKSDB_OUTFILE = $file;
perl;
my $fn = $ENV{'ROCKSDB_OUTFILE'};
my $last_pk = 0;
open(my $fh, '<', $fn) || die "perl open($fn): $!";
while (<$fh>)
{
if ($_ =~ m/^(.*)\t(.*)$/)
{
my $pk = $1;
my $thr = $2;
my $expected_pk = int($last_pk / 100) * 100 + ($thr + 1);
$expected_pk += 100 if $expected_pk <= $last_pk;
if ($expected_pk != $pk)
{
die "Incorrect next pk ($pk); expected $expected_pk (previous: $last_pk)"
}
$last_pk = $pk;
}
else
{
die "output file has incorrect format: $_";
}
}
print stdout "All pk values matched their expected values\n";
EOF
remove_file $file;
# Drop the table to finally clean up
DROP TABLE t1;

View File

@@ -74,6 +74,15 @@ eval LOAD DATA INFILE '$file' INTO TABLE t3;
--enable_query_log --enable_query_log
set rocksdb_bulk_load=0; set rocksdb_bulk_load=0;
# Make sure row count index stats are correct
--replace_column 6 # 7 # 8 # 9 #
SHOW TABLE STATUS WHERE name LIKE 't%';
ANALYZE TABLE t1, t2, t3;
--replace_column 6 # 7 # 8 # 9 #
SHOW TABLE STATUS WHERE name LIKE 't%';
# Make sure all the data is there. # Make sure all the data is there.
select count(pk) from t1; select count(pk) from t1;
select count(a) from t1; select count(a) from t1;

View File

@@ -157,7 +157,7 @@ DROP TABLE t2;
# test invalid regex (missing end bracket) # test invalid regex (missing end bracket)
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b";
--exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 --exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
--error ER_UNKNOWN_ERROR --error ER_UNKNOWN_ERROR
CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]";
@@ -171,7 +171,7 @@ call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:");
# test invalid regex (trailing escape) # test invalid regex (trailing escape)
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; SET GLOBAL rocksdb_strict_collation_exceptions="abc\\";
--exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 --exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
--error ER_UNKNOWN_ERROR --error ER_UNKNOWN_ERROR
CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
SET GLOBAL rocksdb_strict_collation_exceptions="abc"; SET GLOBAL rocksdb_strict_collation_exceptions="abc";

View File

@@ -0,0 +1,27 @@
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS a;
--enable_warnings
create table a (id int, value int, primary key (id) comment 'cf_a') engine=rocksdb;
--disable_query_log
let $i = 1;
while ($i <= 100000) {
let $insert = INSERT INTO a VALUES($i, $i);
inc $i;
eval $insert;
}
--enable_query_log
set rocksdb_bulk_load=1;
set rocksdb_commit_in_the_middle=1;
alter table a add index v (value) COMMENT 'cf_a';
set rocksdb_bulk_load=0;
set rocksdb_commit_in_the_middle=0;
select count(*) from a force index(primary);
select count(*) from a force index(v);
DROP TABLE a;

View File

@@ -21,13 +21,13 @@ insert into t1 values (1,1),(2,2),(3,3);
select * from t1; select * from t1;
set @tmp1=@@rocksdb_verify_checksums; set @tmp1=@@rocksdb_verify_row_debug_checksums;
set rocksdb_verify_checksums=1; set rocksdb_verify_row_debug_checksums=1;
set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; set session debug_dbug= "+d,myrocks_simulate_bad_row_read1";
--error ER_GET_ERRNO --error ER_GET_ERRNO
select * from t1 where pk=1; select * from t1 where pk=1;
set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; set session debug_dbug= "-d,myrocks_simulate_bad_row_read1";
set rocksdb_verify_checksums=@tmp1; set rocksdb_verify_row_debug_checksums=@tmp1;
select * from t1 where pk=1; select * from t1 where pk=1;

View File

@@ -7,3 +7,5 @@ level_serializable: Not supported
slow_query_log: MDEV-11480 slow_query_log: MDEV-11480
select_for_update_skip_locked_nowait: MDEV-11481 select_for_update_skip_locked_nowait: MDEV-11481
rpl_read_free: MDEV-10976 rpl_read_free: MDEV-10976
optimizer_loose_index_scans: MariaDB doesnt support Skip Scan

View File

@@ -20,4 +20,25 @@
--source index.inc --source index.inc
--echo #
--echo # Issue #376: MyRocks: ORDER BY optimizer is unable to use the index extension
--echo #
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1(a int);
insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
create table t2 (
pk int not null,
a int not null,
b int not null,
primary key(pk),
key(a)
) engine=rocksdb;
insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A;
--echo # This must have type=range, index=a, and must not have 'Using filesort':
--replace_column 9 #
explain select * from t2 force index (a) where a=0 and pk>=3 order by pk;
drop table t0,t1,t2;

View File

@@ -3,4 +3,5 @@
--rocksdb_override_cf_options=__system__={memtable=skip_list:16} --rocksdb_override_cf_options=__system__={memtable=skip_list:16}
--rocksdb_compaction_sequential_deletes=0 --rocksdb_compaction_sequential_deletes=0
--rocksdb_compaction_sequential_deletes_window=0 --rocksdb_compaction_sequential_deletes_window=0
--rocksdb_allow_concurrent_memtable_write=0

View File

@@ -0,0 +1,31 @@
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
--enable_warnings
create table t1 (a int, primary key(a) comment 'lock_into_cf1') engine=rocksdb;
insert into t1 values (1);
insert into t1 values (2);
create table t2 (a int, primary key(a) comment 'lock_info_cf2') engine=rocksdb;
insert into t2 values (1);
insert into t2 values (2);
set autocommit=0;
select * from t1 for update;
select * from t2 for update;
use information_schema;
--replace_column 2 _txn_id_ 3 _key_
select rocksdb_ddl.cf, rocksdb_locks.transaction_id, rocksdb_locks.key
from rocksdb_locks
left join rocksdb_ddl
on rocksdb_locks.column_family_id=rocksdb_ddl.column_family
order by rocksdb_ddl.cf;
use test;
DROP TABLE t1;
DROP TABLE t2;

View File

@@ -0,0 +1,3 @@
let $engine=rocksdb;
--source include/loose_index_scans.inc

View File

@@ -823,13 +823,17 @@ update t49 set a = 100 where pk = 1;
--connect (con1,localhost,root,,) --connect (con1,localhost,root,,)
--let $con1_id = `SELECT CONNECTION_ID()` --let $con1_id = `SELECT CONNECTION_ID()`
set rocksdb_lock_wait_timeout=5000; set rocksdb_lock_wait_timeout=60;
set @var1= to_seconds(now()); set @var1= to_seconds(now());
send update t49 set a = 1000 where pk = 1; send update t49 set a = 1000 where pk = 1;
--connect (con2,localhost,root,,) --connect (con2,localhost,root,,)
--echo kill query \$con1_id; --echo kill query \$con1_id;
--disable_query_log --disable_query_log
# If we immeditely kill the query - internally the condition broadcast can
# occur before the lock is waiting on the condition, thus the broadcast call
# is lost. Sleep 1 second to avoid this condition.
--sleep 1
eval kill query $con1_id; eval kill query $con1_id;
--enable_query_log --enable_query_log
--connection con1 --connection con1
@@ -838,10 +842,8 @@ eval kill query $con1_id;
set @var2= to_seconds(now()); set @var2= to_seconds(now());
# We expect the time to kill query in con1 should be below # We expect the time to kill query in con1 should be below
# rocksdb_lock_wait_timeout (5000). # rocksdb_lock_wait_timeout (60).
--echo "[Jay Edgar] I've updated this query to help determine why it is sometimes failing" select if ((@var2 - @var1) < 60, "passed", (@var2 - @var1)) as 'result';
--echo "(t13541934). If you get an error here (i.e. not 'passed') notify me."
select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result';
--connection default --connection default
--disconnect con1 --disconnect con1
@@ -1124,7 +1126,7 @@ set @a=-1;
insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100;
set @tmp1= @@rocksdb_max_row_locks; set @tmp1= @@rocksdb_max_row_locks;
set rocksdb_max_row_locks= 20; set rocksdb_max_row_locks= 20;
--error ER_INTERNAL_ERROR --error ER_GET_ERRMSG
update t1 set a=a+10; update t1 set a=a+10;
DROP TABLE t1; DROP TABLE t1;
@@ -1929,4 +1931,42 @@ SHOW TABLE STATUS LIKE 't1';
SELECT * FROM t1; SELECT * FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # Issue #411: Setting rocksdb_commit_in_the_middle commits transaction
--echo # without releasing iterator
--echo #
CREATE TABLE t1 (id1 bigint(20),
id2 bigint(20),
id3 bigint(20),
PRIMARY KEY (id1, id2, id3))
DEFAULT CHARSET=latin1;
CREATE TABLE t2 (id1 bigint(20),
id2 bigint(20),
PRIMARY KEY (id1, id2))
DEFAULT CHARSET=latin1;
set rocksdb_commit_in_the_middle=1;
SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size;
set rocksdb_bulk_load_size = 100;
--disable_query_log
let $j = 10000;
while ($j)
{
--eval insert into t1 (id1, id2, id3) values (0, $j, 0);
--eval insert into t2 (id1, id2) values (0, $j);
dec $j;
}
--enable_query_log
DELETE t2, t1 FROM t2 LEFT JOIN t1 ON t2.id2 = t1.id2 AND t2.id1 = t1.id1 WHERE t2.id1 = 0;
SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size;
SET rocksdb_commit_in_the_middle=0;
DROP TABLE t1, t2;
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;

View File

@@ -5,8 +5,8 @@
# #
--source include/have_debug.inc --source include/have_debug.inc
set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums; set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums;
set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums; set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums;
set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct;
# wiping mysql log for repeatable tests # wiping mysql log for repeatable tests
@@ -26,7 +26,7 @@ check table t1;
drop table t1; drop table t1;
set session rocksdb_store_checksums=on; set session rocksdb_store_row_debug_checksums=on;
create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
insert into t2 values (1,1,1),(2,2,2),(3,3,3); insert into t2 values (1,1,1),(2,2,2),(3,3,3);
check table t2; check table t2;
@@ -35,13 +35,13 @@ check table t2;
--echo # Now, make a table that has both rows with checksums and without --echo # Now, make a table that has both rows with checksums and without
create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
insert into t3 values (1,1,1),(2,2,2),(3,3,3); insert into t3 values (1,1,1),(2,2,2),(3,3,3);
set session rocksdb_store_checksums=off; set session rocksdb_store_row_debug_checksums=off;
update t3 set b=3 where a=2; update t3 set b=3 where a=2;
set session rocksdb_store_checksums=on; set session rocksdb_store_row_debug_checksums=on;
check table t3; check table t3;
--exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 --exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
set session rocksdb_store_checksums=on; set session rocksdb_store_row_debug_checksums=on;
set session rocksdb_checksums_pct=5; set session rocksdb_checksums_pct=5;
create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
--disable_query_log --disable_query_log
@@ -71,9 +71,9 @@ insert into mtr.test_suppressions values
--echo # 1. Start with mismatch in key checksum of the PK. --echo # 1. Start with mismatch in key checksum of the PK.
set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1"; set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1";
set session rocksdb_verify_checksums=off; set session rocksdb_verify_row_debug_checksums=off;
select * from t3; select * from t3;
set session rocksdb_verify_checksums=on; set session rocksdb_verify_row_debug_checksums=on;
--error ER_INTERNAL_ERROR --error ER_INTERNAL_ERROR
select * from t3; select * from t3;
--error ER_INTERNAL_ERROR --error ER_INTERNAL_ERROR
@@ -82,9 +82,9 @@ set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1";
--echo # 2. Continue with mismatch in pk value checksum. --echo # 2. Continue with mismatch in pk value checksum.
set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2"; set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2";
set session rocksdb_verify_checksums=off; set session rocksdb_verify_row_debug_checksums=off;
select * from t3; select * from t3;
set session rocksdb_verify_checksums=on; set session rocksdb_verify_row_debug_checksums=on;
--error ER_INTERNAL_ERROR --error ER_INTERNAL_ERROR
select * from t3; select * from t3;
--error ER_INTERNAL_ERROR --error ER_INTERNAL_ERROR
@@ -105,6 +105,9 @@ select * from t4 force index(a) where a<1000000;
set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1";
--echo # 4. The same for index-only reads? --echo # 4. The same for index-only reads?
--disable_query_log
set global rocksdb_force_flush_memtable_now=1;
--enable_query_log
--replace_column 9 # --replace_column 9 #
explain explain
select a from t3 force index(a) where a<4; select a from t3 force index(a) where a<4;
@@ -117,8 +120,8 @@ select a from t3 force index(a) where a<4;
select a from t4 force index(a) where a<1000000; select a from t4 force index(a) where a<1000000;
set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1";
set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums;
set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums;
set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct;
drop table t2,t3,t4; drop table t2,t3,t4;

View File

@@ -0,0 +1,90 @@
#
# Some basic sanity tests for deadlock detection.
#
--source include/have_rocksdb.inc
set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect;
set global rocksdb_lock_wait_timeout = 100000;
set global rocksdb_deadlock_detect = ON;
create table t (i int primary key);
create table r1 (id int primary key, value int);
insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
create table r2 like r1;
insert into r2 select * from r1;
# deadlock on scanned locking reads
connect (con1,localhost,root,,);
let $con1= `SELECT CONNECTION_ID()`;
begin;
update r2 set value=100 where id=9;
connect (con2,localhost,root,,);
let $con2= `SELECT CONNECTION_ID()`;
begin;
update r1 set value=100 where id=8;
--send select * from r2 for update;
connection con1;
let $wait_condition =
`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con2', ' and WAITING_KEY != ""')`;
--source include/wait_condition.inc
--error ER_LOCK_DEADLOCK
select * from r1 for update;
rollback;
connection con2;
--reap;
rollback;
connection con1;
begin;
insert into t values (1);
connection con2;
begin;
insert into t values (2);
connect (con3,localhost,root,,);
begin;
insert into t values (3);
connection con1;
--send select * from t where i = 2 for update
connection con2;
let $wait_condition =
`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con1', ' and WAITING_KEY != ""')`;
--source include/wait_condition.inc
--send select * from t where i = 3 for update
connection con3;
let $wait_condition =
`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con2', ' and WAITING_KEY != ""')`;
--source include/wait_condition.inc
select * from t;
--error ER_LOCK_DEADLOCK
insert into t values (4), (1);
--echo # Statement should be rolled back
select * from t;
rollback;
connection con2;
--reap
rollback;
connection con1;
--reap
rollback;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t,r1,r2;

View File

@@ -0,0 +1 @@
--transaction-isolation=read-committed

View File

@@ -0,0 +1 @@
--source t/rocksdb_deadlock_detect.inc

View File

@@ -0,0 +1 @@
--source t/rocksdb_deadlock_detect.inc

View File

@@ -0,0 +1,18 @@
#
# Stress tests deadlock detection
#
--source include/have_rocksdb.inc
create table t1 (a int primary key, b int) engine=rocksdb;
set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect;
set global rocksdb_lock_wait_timeout = 100000;
set global rocksdb_deadlock_detect = ON;
exec python suite/rocksdb/t/rocksdb_deadlock_stress.py root 127.0.0.1 $MASTER_MYPORT test t1 10000 10;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t1;

View File

@@ -0,0 +1,94 @@
"""
This script stress tests deadlock detection.
Usage: rocksdb_deadlock_stress.py user host port db_name table_name
num_iters num_threads
"""
import cStringIO
import hashlib
import MySQLdb
from MySQLdb.constants import ER
import os
import random
import signal
import sys
import threading
import time
import string
import traceback
def is_deadlock_error(exc):
error_code = exc.args[0]
return (error_code == MySQLdb.constants.ER.LOCK_DEADLOCK)
def get_query(table_name, idx):
# Let's assume that even indexes will always be acquireable, to make
# deadlock detection more interesting.
if idx % 2 == 0:
return """SELECT * from %s WHERE a = %d LOCK IN SHARE MODE""" % (table_name, idx)
else:
r = random.randint(1, 3);
if r == 1:
return """SELECT * from %s WHERE a = %d FOR UPDATE""" % (table_name, idx)
elif r == 2:
return """INSERT INTO %s VALUES (%d, 1)
ON DUPLICATE KEY UPDATE b=b+1""" % (table_name, idx)
else:
return """DELETE from %s WHERE a = %d""" % (table_name, idx)
class Worker(threading.Thread):
def __init__(self, con, table_name, num_iters):
threading.Thread.__init__(self)
self.con = con
self.table_name = table_name
self.num_iters = num_iters
self.exception = None
self.start()
def run(self):
try:
self.runme()
except Exception, e:
self.exception = traceback.format_exc()
def runme(self):
cur = self.con.cursor()
for x in xrange(self.num_iters):
try:
for i in random.sample(xrange(100), 10):
cur.execute(get_query(self.table_name, i))
self.con.commit()
except MySQLdb.OperationalError, e:
self.con.rollback()
cur = self.con.cursor()
if not is_deadlock_error(e):
raise e
if __name__ == '__main__':
if len(sys.argv) != 8:
print "Usage: rocksdb_deadlock_stress.py user host port db_name " \
"table_name num_iters num_threads"
sys.exit(1)
user = sys.argv[1]
host = sys.argv[2]
port = int(sys.argv[3])
db = sys.argv[4]
table_name = sys.argv[5]
num_iters = int(sys.argv[6])
num_workers = int(sys.argv[7])
worker_failed = False
workers = []
for i in xrange(num_workers):
w = Worker(
MySQLdb.connect(user=user, host=host, port=port, db=db), table_name,
num_iters)
workers.append(w)
for w in workers:
w.join()
if w.exception:
print "Worker hit an exception:\n%s\n" % w.exception
worker_failed = True
if worker_failed:
sys.exit(1)

View File

@@ -0,0 +1 @@
--transaction-isolation=read-committed

View File

@@ -0,0 +1 @@
--source t/rocksdb_deadlock_stress.inc

View File

@@ -0,0 +1 @@
--source t/rocksdb_deadlock_stress.inc

View File

@@ -0,0 +1 @@
--rocksdb_print_snapshot_conflict_queries=1

View File

@@ -16,6 +16,7 @@ select * from t1 where pk=1 for update;
--connect (con1,localhost,root,,) --connect (con1,localhost,root,,)
--connection con1 --connection con1
call mtr.add_suppression("Got snapshot conflict errors");
--echo ### Connection con1 --echo ### Connection con1
let $ID= `select connection_id()`; let $ID= `select connection_id()`;
set @@rocksdb_lock_wait_timeout=500; set @@rocksdb_lock_wait_timeout=500;

View File

@@ -0,0 +1,31 @@
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
--enable_warnings
CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 SELECT * FROM t1;
--connect (con1,localhost,root,,)
--connect (con2,localhost,root,,)
--connection con1
START TRANSACTION WITH CONSISTENT SNAPSHOT;
SAVEPOINT a;
SELECT * FROM t1 ORDER BY pk;
ROLLBACK TO SAVEPOINT a;
SAVEPOINT a;
SELECT * FROM t2 ORDER BY pk;
ROLLBACK TO SAVEPOINT a;
# should not be blocked
--connection con2
ALTER TABLE t1 RENAME TO t3;
--connection default
DROP TABLE t2, t3;
--disconnect con1
--disconnect con2

View File

@@ -31,10 +31,6 @@ SET lock_wait_timeout = 1;
--sorted_result --sorted_result
SELECT a,b FROM t1 WHERE b='a'; SELECT a,b FROM t1 WHERE b='a';
--sorted_result --sorted_result
--echo #
--echo # Currently, SELECT ... LOCK IN SHARE MODE works like
--echo # SELECT FOR UPDATE
--error ER_LOCK_WAIT_TIMEOUT
SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
--error ER_LOCK_WAIT_TIMEOUT --error ER_LOCK_WAIT_TIMEOUT

View File

@@ -71,7 +71,8 @@ SHOW ENGINE rocksdb TRANSACTION STATUS;
START TRANSACTION WITH CONSISTENT SNAPSHOT; START TRANSACTION WITH CONSISTENT SNAPSHOT;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ #select sleep(10);
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/
SHOW ENGINE rocksdb TRANSACTION STATUS; SHOW ENGINE rocksdb TRANSACTION STATUS;
ROLLBACK; ROLLBACK;

View File

@@ -0,0 +1,35 @@
--source include/have_rocksdb.inc
--echo # If rocksdb_tmpdir is NULL or "", temporary file will be created in
--echo # server configuration variable location(--tmpdir)
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB;
show session variables like 'rocksdb_tmpdir';
--echo # Connection con1
connect (con1,localhost,root);
show session variables like 'rocksdb_tmpdir';
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
connection default;
disconnect con1;
DROP TABLE t1;
--echo # rocksdb_tmpdir with valid location.
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB;
set @tmpdir = @@global.tmpdir;
set global rocksdb_tmpdir = @tmpdir;
show session variables like 'rocksdb_tmpdir';
--echo # Connection con3
connect (con2,localhost,root);
--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
show session variables like 'rocksdb_tmpdir';
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
disconnect con2;
connection default;
set global rocksdb_tmpdir=NULL;
DROP TABLE t1;

View File

@@ -0,0 +1,17 @@
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
create table t1 (a int) engine=rocksdb;
insert into t1 values (1);
insert into t1 values (2);
set autocommit=0;
select * from t1 for update;
--replace_column 1 _TRX_ID_ 3 _NAME_ 7 _KEY_ 14 _THREAD_ID_
select * from information_schema.rocksdb_trx;
DROP TABLE t1;

View File

@@ -0,0 +1,8 @@
!include suite/rpl/my.cnf
[mysqld.1]
binlog_format=row
[mysqld.2]
binlog_format=row
slave_parallel_workers=1
rocksdb_rpl_skip_tx_api=ON

View File

@@ -0,0 +1,42 @@
--source include/master-slave.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
connection slave;
--source include/stop_slave.inc
--enable_warnings
connection master;
create table t1 (a int) engine=rocksdb;
--disable_query_log
--let $aa= 0
while ($aa < 1000) {
eval insert into t1 values ($aa);
--inc $aa
}
--enable_query_log
connection slave;
show variables like 'rocksdb_rpl_skip_tx_api';
--source include/start_slave.inc
--let $it=0
--let $stop=0
while ($stop != 1) {
let $count= query_get_value(select count(*) as Value from information_schema.rocksdb_trx, Value, 1);
if ($count) {
--echo found
--let $stop=1
}
if ($it > 1000) {
--echo not found
--let $stop=1
}
--inc $it
}
connection master;
DROP TABLE t1;
--source include/rpl_end.inc

View File

@@ -1,21 +1,6 @@
--source include/have_rocksdb.inc --source include/have_rocksdb.inc
--source include/have_debug.inc --source include/have_debug.inc
# Test to see if index-only scan fails gracefully if unpack info is not
# available.
set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t (id int not null auto_increment primary key,
c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
key sk (c));
set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
insert into t (c) values ('A'), ('b'), ('C');
--replace_column 9 #
explain select c from t;
select c from t;
select c from t where c = 'a';
drop table t;
# Test if unknown collation works. # Test if unknown collation works.
set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans";
create table t (id int not null auto_increment primary key, create table t (id int not null auto_increment primary key,
@@ -127,7 +112,7 @@ select c1 from t;
drop table t; drop table t;
# Test varchar keyparts with key prefix # Test varchar keyparts with key prefix
set session rocksdb_verify_checksums = on; set session rocksdb_verify_row_debug_checksums = on;
create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1;
insert into t values (1, ' a'); insert into t values (1, ' a');
--replace_column 9 # --replace_column 9 #

View File

@@ -0,0 +1 @@
--rocksdb_debug_optimizer_n_rows=10

View File

@@ -5,81 +5,6 @@
drop table if exists t1, t2; drop table if exists t1, t2;
--enable_warnings --enable_warnings
--echo #
--echo # Check that DECIMAL PK
--echo #
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
--echo # First, make the server to create a dataset in the old format:
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t1 (
pk1 decimal(32,16),
pk2 decimal(32,16),
pk3 decimal(32,16),
a smallint not null,
primary key(pk1, pk2, pk3)
);
insert into t1
select
A.a, B.a, C.a, 1234
from t0 A, t0 B, t0 C;
--echo #
--echo # Looking at the table size, one can tell that the data is stored using
--echo # old format:
--echo #
set global rocksdb_force_flush_memtable_now=1;
--let $data_length_old = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", DATA_LENGTH, 1)
--echo # Check the format version:
select table_name,index_name,kv_format_version
from information_schema.ROCKSDB_DDL
where TABLE_SCHEMA=database() AND table_name='t1';
flush tables;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
--source include/restart_mysqld.inc
--echo # Check that the new server reads the data in the old format:
select * from t1 order by pk1,pk2,pk3 limit 5;
--echo #
--echo # Ok, now, enable the new data format:
--echo #
create table t2 (
pk1 decimal(32,16),
pk2 decimal(32,16),
pk3 decimal(32,16),
a smallint not null,
primary key(pk1, pk2, pk3)
);
insert into t2
select
A.a, B.a, C.a, 1234
from t0 A, t0 B, t0 C;
set global rocksdb_force_flush_memtable_now=1;
--let $data_length_new = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t2'", DATA_LENGTH, 1)
--disable_query_log
--eval select $data_length_old > $data_length_new as "larger"
--enable_query_log
--echo # This should show the new PK data fromat
select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL
where TABLE_SCHEMA=database() AND table_name='t2';
--echo #
--echo # Check that the server is able to read BOTH the old and the new formats:
--echo #
select * from t2 limit 3;
select * from t1 limit 3;
drop table t1,t2;
drop table t0;
--echo # --echo #
--echo # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. --echo # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly.
--echo # (Decoding happens from the mem-comparable image in the index, regardless --echo # (Decoding happens from the mem-comparable image in the index, regardless

View File

@@ -62,9 +62,9 @@ select 'email_i' as index_name, count(*) AS count from t force index(email_i);
drop table t; drop table t;
set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct;
set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums; set @save_rocksdb_verify_row_debug_checksums = @@session.rocksdb_verify_row_debug_checksums;
set global rocksdb_checksums_pct = 100; set global rocksdb_checksums_pct = 100;
set session rocksdb_verify_checksums = on; set session rocksdb_verify_row_debug_checksums = on;
create table t (id int primary key, email varchar(100), KEY email_i (email(30))); create table t (id int primary key, email varchar(100), KEY email_i (email(30)));
insert into t values (1, 'a'); insert into t values (1, 'a');
--replace_column 9 # --replace_column 9 #
@@ -72,4 +72,4 @@ explain select 'email_i' as index_name, count(*) AS count from t force index(ema
select 'email_i' as index_name, count(*) AS count from t force index(email_i); select 'email_i' as index_name, count(*) AS count from t force index(email_i);
drop table t; drop table t;
set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct;
set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums; set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums;

View File

@@ -1,138 +0,0 @@
#
# VARCHAR encoding tests that require debug support
#
--source include/have_rocksdb.inc
--source include/have_debug.inc
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans";
--let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_general_ci
--source type_varchar_endspace.inc
set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans";
--echo #
--echo # Check backwards compatibility:
--echo #
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
--echo # Create the tables in the old format
create table t1 (
pk varchar(64) collate latin1_bin,
col1 varchar(64),
primary key (pk)
);
insert into t1 values ('a','a');
--echo # The following will not produce an error:
insert into t1 values ('a ', 'a-space');
select pk, hex(pk), col1 from t1;
create table t2 (
pk int not null primary key,
col1 varchar(64) collate latin1_bin,
col2 varchar(64),
unique key (col1)
);
insert into t2 values (0, 'ab', 'a-b');
--echo # The following will not produce an error:
insert into t2 values (1, 'a ', 'a-space');
insert into t2 values (2, 'a', 'a');
select pk, col1, hex(col1), col2 from t2;
--echo # Check the format version:
--sorted_result
select table_name,index_name,kv_format_version
from information_schema.ROCKSDB_DDL
where TABLE_SCHEMA=database() AND table_name in ('t1','t2');
flush tables;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
select pk, hex(pk), col1 from t1;
select pk, col1, hex(col1), col2 from t2;
## Check that we can still read the data when starting on the old datadir:
--source include/restart_mysqld.inc
select pk, hex(pk), col1 from t1;
select pk, col1, hex(col1), col2 from t2;
drop table t1,t2;
--echo #
--echo # General upgrade tests to see that they work.
--echo #
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t2 (
id int primary key,
col1 varchar(64) collate latin1_swedish_ci,
unique key (col1)
) engine=rocksdb;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
insert into t2 values (1, 'a');
insert into t2 values (2, 'b');
insert into t2 values (3, 'c');
# Check if this is indeed the old format
insert into t2 values (4, 'c ');
select col1 from t2;
delete from t2 where id = 4;
alter table t2 engine=rocksdb;
select col1 from t2;
# Check if this is indeed the new format
--error ER_DUP_ENTRY
insert into t2 values (4, 'c ');
drop table t2;
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t2 (
id int primary key,
col1 varchar(64) collate latin1_bin,
unique key (col1)
) engine=rocksdb;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
insert into t2 values (1, 'a');
insert into t2 values (2, 'b');
insert into t2 values (3, 'c');
# Check if this is indeed the old format
insert into t2 values (4, 'c ');
select col1 from t2;
delete from t2 where id = 4;
alter table t2 engine=rocksdb;
select col1 from t2;
# Check if this is indeed the new format
--error ER_DUP_ENTRY
insert into t2 values (4, 'c ');
drop table t2;
--echo #
--echo # Check what happens when one tries to 'upgrade' to the new data format
--echo # and causes a unique key violation:
--echo #
set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
create table t2 (
pk int not null primary key,
col1 varchar(64) collate latin1_bin,
col2 varchar(64),
unique key (col1)
);
insert into t2 values (1, 'a ', 'a-space');
insert into t2 values (2, 'a', 'a');
select * from t2;
set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
--error ER_DUP_ENTRY
alter table t2 engine=rocksdb;
drop table t2;

View File

@@ -0,0 +1,47 @@
--source include/have_rocksdb.inc
# Issue221
# Turning on both --rocksdb-allow-mmap-reads and --rocksdb-use-direct-reads
# caused an assertion in RocksDB. Now it should not be allowed and the
# server will not start with that configuration
# Write file to make mysql-test-run.pl expect the "crash", but don't restart
# the server until it is told to
--let $_server_id= `SELECT @@server_id`
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
--exec echo "wait" >$_expect_file_name
shutdown_server 10;
# Clear the log
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
# Attempt to restart the server with invalid options
--exec echo "restart:--rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1" >$_expect_file_name
--sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart
--exec echo "restart:" >$_expect_file_name
# Cleanup
--enable_reconnect
--source include/wait_until_connected_again.inc
--disable_reconnect
# We should now have an error message
--exec grep "enable both use_direct_reads" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
# Repeat with --rocksdb-use-direct-writes
--let $_server_id= `SELECT @@server_id`
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
--exec echo "wait" >$_expect_file_name
shutdown_server 10;
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
--exec echo "restart:--rocksdb_use_direct_writes=1 --rocksdb_allow_mmap_writes=1" >$_expect_file_name
--sleep 0.1
--exec echo "restart:" >$_expect_file_name
--enable_reconnect
--source include/wait_until_connected_again.inc
--disable_reconnect
--exec grep "enable both use_direct_writes" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2

View File

@@ -1,7 +0,0 @@
SET @start_global_value = @@global.ROCKSDB_ALLOW_OS_BUFFER;
SELECT @start_global_value;
@start_global_value
1
"Trying to set variable @@global.ROCKSDB_ALLOW_OS_BUFFER to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_ALLOW_OS_BUFFER = 444;
ERROR HY000: Variable 'rocksdb_allow_os_buffer' is a read only variable

View File

@@ -1,7 +1,7 @@
SET @start_global_value = @@global.ROCKSDB_BLOCK_CACHE_SIZE; SET @start_global_value = @@global.ROCKSDB_BLOCK_CACHE_SIZE;
SELECT @start_global_value; SELECT @start_global_value;
@start_global_value @start_global_value
8388608 536870912
"Trying to set variable @@global.ROCKSDB_BLOCK_CACHE_SIZE to 444. It should fail because it is readonly." "Trying to set variable @@global.ROCKSDB_BLOCK_CACHE_SIZE to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_BLOCK_CACHE_SIZE = 444; SET @@global.ROCKSDB_BLOCK_CACHE_SIZE = 444;
ERROR HY000: Variable 'rocksdb_block_cache_size' is a read only variable ERROR HY000: Variable 'rocksdb_block_cache_size' is a read only variable

View File

@@ -0,0 +1,121 @@
CREATE TABLE valid_values (value varchar(255));
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
INSERT INTO valid_values VALUES('off');
CREATE TABLE invalid_values (value varchar(255));
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_DEADLOCK_DETECT;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_DEADLOCK_DETECT;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 1"
SET @@global.ROCKSDB_DEADLOCK_DETECT = 1;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 0"
SET @@global.ROCKSDB_DEADLOCK_DETECT = 0;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to on"
SET @@global.ROCKSDB_DEADLOCK_DETECT = on;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to off"
SET @@global.ROCKSDB_DEADLOCK_DETECT = off;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to 1"
SET @@session.ROCKSDB_DEADLOCK_DETECT = 1;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to 0"
SET @@session.ROCKSDB_DEADLOCK_DETECT = 0;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to on"
SET @@session.ROCKSDB_DEADLOCK_DETECT = on;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to off"
SET @@session.ROCKSDB_DEADLOCK_DETECT = off;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 'aaa'"
SET @@global.ROCKSDB_DEADLOCK_DETECT = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 'bbb'"
SET @@global.ROCKSDB_DEADLOCK_DETECT = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
SET @@global.ROCKSDB_DEADLOCK_DETECT = @start_global_value;
SELECT @@global.ROCKSDB_DEADLOCK_DETECT;
@@global.ROCKSDB_DEADLOCK_DETECT
0
SET @@session.ROCKSDB_DEADLOCK_DETECT = @start_session_value;
SELECT @@session.ROCKSDB_DEADLOCK_DETECT;
@@session.ROCKSDB_DEADLOCK_DETECT
0
DROP TABLE valid_values;
DROP TABLE invalid_values;

View File

@@ -0,0 +1,64 @@
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
SELECT @start_global_value;
@start_global_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 1"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 1;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 0"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 0;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to on"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = on;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
"Trying to set variable @@session.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 444. It should fail because it is not session."
SET @@session.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 444;
ERROR HY000: Variable 'rocksdb_print_snapshot_conflict_queries' is a GLOBAL variable and should be set with SET GLOBAL
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 'aaa'"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 'bbb'"
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = @start_global_value;
SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES;
@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES
0
DROP TABLE valid_values;
DROP TABLE invalid_values;

View File

@@ -1,100 +0,0 @@
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_STORE_CHECKSUMS;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_STORE_CHECKSUMS;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 1"
SET @@global.ROCKSDB_STORE_CHECKSUMS = 1;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 0"
SET @@global.ROCKSDB_STORE_CHECKSUMS = 0;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to on"
SET @@global.ROCKSDB_STORE_CHECKSUMS = on;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 1"
SET @@session.ROCKSDB_STORE_CHECKSUMS = 1;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
0
"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 0"
SET @@session.ROCKSDB_STORE_CHECKSUMS = 0;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
0
"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to on"
SET @@session.ROCKSDB_STORE_CHECKSUMS = on;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'aaa'"
SET @@global.ROCKSDB_STORE_CHECKSUMS = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'bbb'"
SET @@global.ROCKSDB_STORE_CHECKSUMS = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
SET @@global.ROCKSDB_STORE_CHECKSUMS = @start_global_value;
SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
@@global.ROCKSDB_STORE_CHECKSUMS
0
SET @@session.ROCKSDB_STORE_CHECKSUMS = @start_session_value;
SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
@@session.ROCKSDB_STORE_CHECKSUMS
0
DROP TABLE valid_values;
DROP TABLE invalid_values;

View File

@@ -0,0 +1,100 @@
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 1"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 1;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 0"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 0;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to on"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = on;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 1"
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 1;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 0"
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 0;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to on"
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = on;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 'aaa'"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 'bbb'"
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = @start_global_value;
SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = @start_session_value;
SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS;
@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS
0
DROP TABLE valid_values;
DROP TABLE invalid_values;

View File

@@ -0,0 +1,29 @@
SET @start_global_value = @@global.rocksdb_tmpdir;
SELECT @start_global_value;
@start_global_value
select @@session.rocksdb_tmpdir;
@@session.rocksdb_tmpdir
show global variables like 'rocksdb_tmpdir';
Variable_name Value
rocksdb_tmpdir
show session variables like 'rocksdb_tmpdir';
Variable_name Value
rocksdb_tmpdir
select * from information_schema.global_variables where variable_name='rocksdb_tmpdir';
VARIABLE_NAME VARIABLE_VALUE
ROCKSDB_TMPDIR
select * from information_schema.session_variables where variable_name='rocksdb_tmpdir';
VARIABLE_NAME VARIABLE_VALUE
ROCKSDB_TMPDIR
set global rocksdb_tmpdir='value';
set session rocksdb_tmpdir='value';
set global rocksdb_tmpdir=1.1;
ERROR 42000: Incorrect argument type to variable 'rocksdb_tmpdir'
set global rocksdb_tmpdir=1e1;
ERROR 42000: Incorrect argument type to variable 'rocksdb_tmpdir'
SET @@global.rocksdb_tmpdir = @start_global_value;
SELECT @@global.rocksdb_tmpdir;
@@global.rocksdb_tmpdir

View File

@@ -0,0 +1,100 @@
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_TRACE_SST_API;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_TRACE_SST_API;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 1"
SET @@global.ROCKSDB_TRACE_SST_API = 1;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 0"
SET @@global.ROCKSDB_TRACE_SST_API = 0;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to on"
SET @@global.ROCKSDB_TRACE_SST_API = on;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to 1"
SET @@session.ROCKSDB_TRACE_SST_API = 1;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
0
"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to 0"
SET @@session.ROCKSDB_TRACE_SST_API = 0;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
0
"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to on"
SET @@session.ROCKSDB_TRACE_SST_API = on;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 'aaa'"
SET @@global.ROCKSDB_TRACE_SST_API = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 'bbb'"
SET @@global.ROCKSDB_TRACE_SST_API = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
SET @@global.ROCKSDB_TRACE_SST_API = @start_global_value;
SELECT @@global.ROCKSDB_TRACE_SST_API;
@@global.ROCKSDB_TRACE_SST_API
0
SET @@session.ROCKSDB_TRACE_SST_API = @start_session_value;
SELECT @@session.ROCKSDB_TRACE_SST_API;
@@session.ROCKSDB_TRACE_SST_API
0
DROP TABLE valid_values;
DROP TABLE invalid_values;

View File

@@ -0,0 +1,7 @@
SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_READS;
SELECT @start_global_value;
@start_global_value
0
"Trying to set variable @@global.ROCKSDB_USE_DIRECT_READS to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_USE_DIRECT_READS = 444;
ERROR HY000: Variable 'rocksdb_use_direct_reads' is a read only variable

View File

@@ -0,0 +1,7 @@
SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_WRITES;
SELECT @start_global_value;
@start_global_value
0
"Trying to set variable @@global.ROCKSDB_USE_DIRECT_WRITES to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_USE_DIRECT_WRITES = 444;
ERROR HY000: Variable 'rocksdb_use_direct_writes' is a read only variable

Some files were not shown because too many files have changed in this diff Show More