mirror of
https://github.com/MariaDB/server.git
synced 2025-08-08 11:22:35 +03:00
Copy of
commit ba00e640f658ad8d0a4dff09a497a51b8a4de935 Author: Herman Lee <herman@fb.com> Date: Wed Feb 22 06:30:06 2017 -0800 Improve add_index_alter_cardinality test Summary: Split add_index_inplace_cardinality test out and add a debug_sync point to it so that the flush of the memtable occurs while the alter is running. Closes https://github.com/facebook/mysql-5.6/pull/539 Reviewed By: alxyang Differential Revision: D4597887 Pulled By: hermanlee fbshipit-source-id: faedda2
This commit is contained in:
93
storage/rocksdb/.clang-format
Normal file
93
storage/rocksdb/.clang-format
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
Language: Cpp
|
||||
# BasedOnStyle: LLVM
|
||||
AccessModifierOffset: -2
|
||||
AlignAfterOpenBracket: Align
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlinesLeft: false
|
||||
AlignOperands: true
|
||||
AlignTrailingComments: true
|
||||
AllowAllParametersOfDeclarationOnNextLine: true
|
||||
AllowShortBlocksOnASingleLine: false
|
||||
AllowShortCaseLabelsOnASingleLine: false
|
||||
AllowShortFunctionsOnASingleLine: All
|
||||
AllowShortIfStatementsOnASingleLine: false
|
||||
AllowShortLoopsOnASingleLine: false
|
||||
AlwaysBreakAfterDefinitionReturnType: None
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakBeforeMultilineStrings: false
|
||||
AlwaysBreakTemplateDeclarations: false
|
||||
BinPackArguments: true
|
||||
BinPackParameters: true
|
||||
BraceWrapping:
|
||||
AfterClass: false
|
||||
AfterControlStatement: false
|
||||
AfterEnum: false
|
||||
AfterFunction: false
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: false
|
||||
AfterStruct: false
|
||||
AfterUnion: false
|
||||
BeforeCatch: false
|
||||
BeforeElse: false
|
||||
IndentBraces: false
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeBraces: Attach
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: false
|
||||
BreakAfterJavaFieldAnnotations: false
|
||||
BreakStringLiterals: true
|
||||
ColumnLimit: 80
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: false
|
||||
ConstructorInitializerIndentWidth: 4
|
||||
ContinuationIndentWidth: 4
|
||||
Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
|
||||
IncludeCategories:
|
||||
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
|
||||
Priority: 2
|
||||
- Regex: '^(<|"(gtest|isl|json)/)'
|
||||
Priority: 3
|
||||
- Regex: '.*'
|
||||
Priority: 1
|
||||
IncludeIsMainRegex: '$'
|
||||
IndentCaseLabels: false
|
||||
IndentWidth: 2
|
||||
IndentWrappedFunctionNames: false
|
||||
KeepEmptyLinesAtTheStartOfBlocks: true
|
||||
MacroBlockBegin: ''
|
||||
MacroBlockEnd: ''
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
ObjCBlockIndentWidth: 2
|
||||
ObjCSpaceAfterProperty: false
|
||||
ObjCSpaceBeforeProtocolList: true
|
||||
PenaltyBreakBeforeFirstCallParameter: 19
|
||||
PenaltyBreakComment: 300
|
||||
PenaltyBreakFirstLessLess: 120
|
||||
PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 60
|
||||
PointerAlignment: Right
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
SpaceAfterCStyleCast: false
|
||||
SpaceBeforeAssignmentOperators: true
|
||||
SpaceBeforeParens: ControlStatements
|
||||
SpaceInEmptyParentheses: false
|
||||
SpacesBeforeTrailingComments: 1
|
||||
SpacesInAngles: false
|
||||
SpacesInContainerLiterals: true
|
||||
SpacesInCStyleCastParentheses: false
|
||||
SpacesInParentheses: false
|
||||
SpacesInSquareBrackets: false
|
||||
Standard: Cpp11
|
||||
TabWidth: 8
|
||||
UseTab: Never
|
||||
JavaScriptQuotes: Leave
|
||||
...
|
@@ -28,6 +28,11 @@ INCLUDE_DIRECTORIES(
|
||||
ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX
|
||||
-DZLIB)
|
||||
|
||||
CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE)
|
||||
IF(HAVE_FALLOCATE)
|
||||
ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT)
|
||||
ENDIF()
|
||||
|
||||
SET(ROCKSDB_SOURCES
|
||||
ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h
|
||||
logger.h
|
||||
@@ -41,6 +46,7 @@ SET(ROCKSDB_SOURCES
|
||||
rdb_index_merge.cc rdb_index_merge.h
|
||||
rdb_perf_context.cc rdb_perf_context.h
|
||||
rdb_mutex_wrapper.cc rdb_mutex_wrapper.h
|
||||
rdb_psi.h rdb_psi.cc
|
||||
rdb_sst_info.cc rdb_sst_info.h
|
||||
rdb_utils.cc rdb_utils.h rdb_buff.h
|
||||
rdb_threads.cc rdb_threads.h
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -121,6 +121,34 @@ const char *const BG_THREAD_NAME = "myrocks-bg";
|
||||
*/
|
||||
const char *const INDEX_THREAD_NAME = "myrocks-index";
|
||||
|
||||
/*
|
||||
Separator between partition name and the qualifier. Sample usage:
|
||||
|
||||
- p0_cfname=foo
|
||||
- p3_tts_col=bar
|
||||
*/
|
||||
const char RDB_PER_PARTITION_QUALIFIER_NAME_SEP = '_';
|
||||
|
||||
/*
|
||||
Separator between qualifier name and value. Sample usage:
|
||||
|
||||
- p0_cfname=foo
|
||||
- p3_tts_col=bar
|
||||
*/
|
||||
const char RDB_PER_PARTITION_QUALIFIER_VALUE_SEP = '=';
|
||||
|
||||
/*
|
||||
Separator between multiple qualifier assignments. Sample usage:
|
||||
|
||||
- p0_cfname=foo;p1_cfname=bar;p2_cfname=baz
|
||||
*/
|
||||
const char RDB_QUALIFIER_SEP = ';';
|
||||
|
||||
/*
|
||||
Qualifier name for a custom per partition column family.
|
||||
*/
|
||||
const char *const RDB_CF_NAME_QUALIFIER = "cfname";
|
||||
|
||||
/*
|
||||
Default, minimal valid, and maximum valid sampling rate values when collecting
|
||||
statistics about table.
|
||||
@@ -192,7 +220,9 @@ const char *const INDEX_THREAD_NAME = "myrocks-index";
|
||||
#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1)
|
||||
#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2)
|
||||
#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3)
|
||||
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS
|
||||
#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 4)
|
||||
#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 5)
|
||||
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED
|
||||
|
||||
inline bool looks_like_per_index_cf_typo(const char *const name) {
|
||||
return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME));
|
||||
@@ -258,6 +288,7 @@ enum operation_type {
|
||||
ROWS_INSERTED,
|
||||
ROWS_READ,
|
||||
ROWS_UPDATED,
|
||||
ROWS_DELETED_BLIND,
|
||||
ROWS_MAX
|
||||
};
|
||||
|
||||
@@ -282,6 +313,7 @@ struct st_export_stats {
|
||||
ulonglong rows_inserted;
|
||||
ulonglong rows_read;
|
||||
ulonglong rows_updated;
|
||||
ulonglong rows_deleted_blind;
|
||||
|
||||
ulonglong system_rows_deleted;
|
||||
ulonglong system_rows_inserted;
|
||||
@@ -474,13 +506,13 @@ class ha_rocksdb : public my_core::handler {
|
||||
const rocksdb::Slice &key, std::string *const value) const;
|
||||
|
||||
int get_row_by_rowid(uchar *const buf, const char *const rowid,
|
||||
const uint rowid_size)
|
||||
const uint rowid_size, const bool skip_lookup = false)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
int get_row_by_rowid(uchar *const buf, const uchar *const rowid,
|
||||
const uint rowid_size)
|
||||
const uint rowid_size, const bool skip_lookup = false)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) {
|
||||
return get_row_by_rowid(buf, reinterpret_cast<const char *>(rowid),
|
||||
rowid_size);
|
||||
rowid_size, skip_lookup);
|
||||
}
|
||||
|
||||
void update_auto_incr_val();
|
||||
@@ -491,6 +523,7 @@ class ha_rocksdb : public my_core::handler {
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
bool can_use_single_delete(const uint &index) const
|
||||
MY_ATTRIBUTE((__warn_unused_result__));
|
||||
bool is_blind_delete_enabled();
|
||||
bool skip_unique_check() const MY_ATTRIBUTE((__warn_unused_result__));
|
||||
void set_force_skip_unique_check(bool skip) override;
|
||||
bool commit_in_the_middle() MY_ATTRIBUTE((__warn_unused_result__));
|
||||
@@ -667,6 +700,21 @@ public:
|
||||
int rename_table(const char *const from, const char *const to) override
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
int convert_blob_from_storage_format(my_core::Field_blob *const blob,
|
||||
Rdb_string_reader *const reader,
|
||||
bool decode)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
int convert_varchar_from_storage_format(
|
||||
my_core::Field_varstring *const field_var,
|
||||
Rdb_string_reader *const reader, bool decode)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
int convert_field_from_storage_format(my_core::Field *const field,
|
||||
Rdb_string_reader *const reader,
|
||||
bool decode, uint len)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
int convert_record_from_storage_format(const rocksdb::Slice *const key,
|
||||
const rocksdb::Slice *const value,
|
||||
uchar *const buf)
|
||||
@@ -681,6 +729,17 @@ public:
|
||||
rocksdb::Slice *const packed_rec)
|
||||
MY_ATTRIBUTE((__nonnull__));
|
||||
|
||||
static const std::string gen_cf_name_qualifier_for_partition(
|
||||
const std::string &s);
|
||||
|
||||
static const std::vector<std::string> parse_into_tokens(const std::string &s,
|
||||
const char delim);
|
||||
|
||||
static const std::string generate_cf_name(const uint index,
|
||||
const TABLE *const table_arg,
|
||||
const Rdb_tbl_def *const tbl_def_arg,
|
||||
bool *per_part_match_found);
|
||||
|
||||
static const char *get_key_name(const uint index,
|
||||
const TABLE *const table_arg,
|
||||
const Rdb_tbl_def *const tbl_def_arg)
|
||||
@@ -702,7 +761,6 @@ public:
|
||||
static bool is_pk(const uint index, const TABLE *table_arg,
|
||||
const Rdb_tbl_def *tbl_def_arg)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
/** @brief
|
||||
unireg.cc will call max_supported_record_length(), max_supported_keys(),
|
||||
max_supported_key_parts(), uint max_supported_key_length()
|
||||
@@ -827,6 +885,7 @@ private:
|
||||
rocksdb::ColumnFamilyHandle *cf_handle;
|
||||
bool is_reverse_cf;
|
||||
bool is_auto_cf;
|
||||
bool is_per_partition_cf;
|
||||
};
|
||||
|
||||
struct update_row_info {
|
||||
@@ -946,10 +1005,8 @@ private:
|
||||
int read_before_key(const Rdb_key_def &kd, const bool &using_full_key,
|
||||
const rocksdb::Slice &key_slice)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
int read_after_key(const Rdb_key_def &kd, const bool &using_full_key,
|
||||
const rocksdb::Slice &key_slice)
|
||||
int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
int position_to_correct_key(
|
||||
const Rdb_key_def &kd, const enum ha_rkey_function &find_flag,
|
||||
const bool &full_key_match, const uchar *const key,
|
||||
|
@@ -23,6 +23,10 @@ namespace myrocks {
|
||||
|
||||
class Rdb_logger : public rocksdb::Logger {
|
||||
public:
|
||||
explicit Rdb_logger(const rocksdb::InfoLogLevel log_level =
|
||||
rocksdb::InfoLogLevel::ERROR_LEVEL)
|
||||
: m_mysql_log_level(log_level) {}
|
||||
|
||||
void Logv(const rocksdb::InfoLogLevel log_level, const char *format,
|
||||
va_list ap) override {
|
||||
DBUG_ASSERT(format != nullptr);
|
||||
@@ -33,7 +37,7 @@ public:
|
||||
m_logger->Logv(log_level, format, ap);
|
||||
}
|
||||
|
||||
if (log_level < GetInfoLogLevel()) {
|
||||
if (log_level < m_mysql_log_level) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -61,8 +65,21 @@ public:
|
||||
m_logger = logger;
|
||||
}
|
||||
|
||||
void SetInfoLogLevel(const rocksdb::InfoLogLevel log_level) override {
|
||||
// The InfoLogLevel for the logger is used by rocksdb to filter
|
||||
// messages, so it needs to be the lower of the two loggers
|
||||
rocksdb::InfoLogLevel base_level = log_level;
|
||||
|
||||
if (m_logger && m_logger->GetInfoLogLevel() < base_level) {
|
||||
base_level = m_logger->GetInfoLogLevel();
|
||||
}
|
||||
rocksdb::Logger::SetInfoLogLevel(base_level);
|
||||
m_mysql_log_level = log_level;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<rocksdb::Logger> m_logger;
|
||||
rocksdb::InfoLogLevel m_mysql_log_level;
|
||||
};
|
||||
|
||||
} // namespace myrocks
|
||||
|
@@ -5,7 +5,7 @@ USE mysqlslap;
|
||||
CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb;
|
||||
# 2PC enabled, MyRocks durability enabled
|
||||
SET GLOBAL rocksdb_enable_2pc=0;
|
||||
SET GLOBAL rocksdb_write_sync=1;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
## 2PC + durability + single thread
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
@@ -18,7 +18,7 @@ case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else '
|
||||
false
|
||||
# 2PC enabled, MyRocks durability disabled
|
||||
SET GLOBAL rocksdb_enable_2pc=0;
|
||||
SET GLOBAL rocksdb_write_sync=0;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
case when variable_value-@c = 0 then 'true' else 'false' end
|
||||
@@ -29,7 +29,7 @@ case when variable_value-@c = 0 then 'true' else 'false' end
|
||||
true
|
||||
# 2PC disabled, MyRocks durability enabled
|
||||
SET GLOBAL rocksdb_enable_2pc=1;
|
||||
SET GLOBAL rocksdb_write_sync=1;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
case when variable_value-@c = 0 then 'true' else 'false' end
|
||||
@@ -39,6 +39,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
|
||||
case when variable_value-@c = 0 then 'true' else 'false' end
|
||||
false
|
||||
SET GLOBAL rocksdb_enable_2pc=1;
|
||||
SET GLOBAL rocksdb_write_sync=0;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
DROP TABLE t1;
|
||||
DROP DATABASE mysqlslap;
|
||||
|
@@ -0,0 +1,21 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB;
|
||||
INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
|
||||
SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed';
|
||||
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
|
||||
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
|
||||
SET debug_sync= 'now SIGNAL flushed';
|
||||
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
|
||||
WHERE INDEX_NUMBER =
|
||||
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
|
||||
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
|
||||
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
|
||||
# # SSTNAME 5 # # # # # 5
|
||||
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
|
||||
WHERE INDEX_NUMBER =
|
||||
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
|
||||
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj");
|
||||
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
|
||||
# # SSTNAME 5 # # # # # 5,5
|
||||
SET debug_sync='RESET';
|
||||
DROP TABLE t1;
|
@@ -0,0 +1,85 @@
|
||||
include/master-slave.inc
|
||||
Warnings:
|
||||
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
|
||||
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
|
||||
[connection master]
|
||||
set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key;
|
||||
set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api;
|
||||
DROP TABLE IF EXISTS t1,t2;
|
||||
create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb;
|
||||
create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
|
||||
SET session rocksdb_blind_delete_primary_key=1;
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
variable_value-@c
|
||||
1000
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
9000
|
||||
include/sync_slave_sql_with_master.inc
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
9000
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
variable_value-@c
|
||||
0
|
||||
SELECT count(*) FROM t2;
|
||||
count(*)
|
||||
9000
|
||||
SET session rocksdb_master_skip_tx_api=1;
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
variable_value-@c
|
||||
1000
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
8000
|
||||
SELECT count(*) FROM t2;
|
||||
count(*)
|
||||
8000
|
||||
include/sync_slave_sql_with_master.inc
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
8000
|
||||
SELECT count(*) FROM t2;
|
||||
count(*)
|
||||
8000
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000;
|
||||
DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000;
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
variable_value-@c
|
||||
0
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
7000
|
||||
SELECT count(*) FROM t2;
|
||||
count(*)
|
||||
7000
|
||||
include/sync_slave_sql_with_master.inc
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
7000
|
||||
SELECT count(*) FROM t2;
|
||||
count(*)
|
||||
7000
|
||||
DELETE FROM t1 WHERE id = 10;
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
7000
|
||||
call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*");
|
||||
call mtr.add_suppression("Slave: Can't find record in 't1'.*");
|
||||
include/wait_for_slave_sql_error.inc [errno=1032]
|
||||
set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables;
|
||||
set global rocksdb_read_free_rpl_tables="t.*";
|
||||
START SLAVE;
|
||||
include/sync_slave_sql_with_master.inc
|
||||
SELECT count(*) FROM t1;
|
||||
count(*)
|
||||
7000
|
||||
set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables;
|
||||
SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key;
|
||||
SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api;
|
||||
DROP TABLE t1, t2;
|
||||
include/rpl_end.inc
|
@@ -1,4 +1,4 @@
|
||||
DROP TABLE IF EXISTS t1, t2;
|
||||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
|
||||
CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
|
||||
CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'
|
||||
@@ -19,9 +19,9 @@ LOAD DATA INFILE <input_file> INTO TABLE t3;
|
||||
set rocksdb_bulk_load=0;
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
|
||||
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
|
||||
ANALYZE TABLE t1, t2, t3;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
@@ -29,36 +29,36 @@ test.t2 analyze status OK
|
||||
test.t3 analyze status OK
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
|
||||
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
|
||||
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
|
||||
select count(pk) from t1;
|
||||
count(pk)
|
||||
10000000
|
||||
5000000
|
||||
select count(a) from t1;
|
||||
count(a)
|
||||
10000000
|
||||
5000000
|
||||
select count(b) from t1;
|
||||
count(b)
|
||||
10000000
|
||||
5000000
|
||||
select count(pk) from t2;
|
||||
count(pk)
|
||||
10000000
|
||||
5000000
|
||||
select count(a) from t2;
|
||||
count(a)
|
||||
10000000
|
||||
5000000
|
||||
select count(b) from t2;
|
||||
count(b)
|
||||
10000000
|
||||
5000000
|
||||
select count(pk) from t3;
|
||||
count(pk)
|
||||
10000000
|
||||
5000000
|
||||
select count(a) from t3;
|
||||
count(a)
|
||||
10000000
|
||||
5000000
|
||||
select count(b) from t3;
|
||||
count(b)
|
||||
10000000
|
||||
5000000
|
||||
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
|
||||
test.bulk_load.tmp
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
@@ -125,4 +125,5 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro
|
||||
CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
|
||||
ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin).
|
||||
DROP TABLE abc;
|
||||
SET GLOBAL rocksdb_strict_collation_exceptions=null;
|
||||
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
|
||||
|
@@ -1,4 +1,4 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB;
|
||||
ERROR HY000: Incorrect arguments to column family not valid for storing index data
|
||||
ERROR HY000: Incorrect arguments to column family not valid for storing index data.
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
@@ -66,7 +66,7 @@ Handler_read_prev 0
|
||||
Handler_read_rnd 0
|
||||
Handler_read_rnd_next 10
|
||||
FLUSH STATUS;
|
||||
SELECT * FROM t1 WHERE b <=5 ORDER BY b;
|
||||
SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b;
|
||||
id a b
|
||||
4 NULL 4
|
||||
5 NULL 5
|
||||
|
@@ -22,7 +22,7 @@ insert into linktable (id1, link_type, id2) values (2, 1, 7);
|
||||
insert into linktable (id1, link_type, id2) values (2, 1, 8);
|
||||
insert into linktable (id1, link_type, id2) values (2, 1, 9);
|
||||
insert into linktable (id1, link_type, id2) values (2, 1, 10);
|
||||
explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
|
||||
explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL # Using where
|
||||
1 SIMPLE linktable range PRIMARY PRIMARY 24 NULL # Using where
|
||||
drop table linktable;
|
||||
|
32
storage/rocksdb/mysql-test/rocksdb/r/issue495.result
Normal file
32
storage/rocksdb/mysql-test/rocksdb/r/issue495.result
Normal file
@@ -0,0 +1,32 @@
|
||||
drop table if exists t;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 'test.t'
|
||||
create table t (
|
||||
a int,
|
||||
b int,
|
||||
c varchar(12249) collate latin1_bin,
|
||||
d datetime,
|
||||
e int,
|
||||
f int,
|
||||
g blob,
|
||||
h int,
|
||||
i int,
|
||||
key (b,e),
|
||||
key (h,b)
|
||||
) engine=rocksdb
|
||||
partition by linear hash (i) partitions 8 ;
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
select i from t group by h;
|
||||
i
|
||||
1
|
||||
select i from t group by h;
|
||||
i
|
||||
1
|
||||
drop table t;
|
@@ -124,6 +124,51 @@ UNLOCK TABLES;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
||||
|
||||
2
|
||||
==== mysqldump with --innodb-stats-on-metadata ====
|
||||
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8 */;
|
||||
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
|
||||
/*!40103 SET TIME_ZONE='+00:00' */;
|
||||
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
|
||||
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
|
||||
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
|
||||
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893;
|
||||
DROP TABLE IF EXISTS `r1`;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE `r1` (
|
||||
`id1` int(11) NOT NULL DEFAULT '0',
|
||||
`id2` int(11) NOT NULL DEFAULT '0',
|
||||
`id3` varchar(100) NOT NULL DEFAULT '',
|
||||
`id4` int(11) NOT NULL DEFAULT '0',
|
||||
`value1` int(11) DEFAULT NULL,
|
||||
`value2` int(11) DEFAULT NULL,
|
||||
`value3` int(11) DEFAULT NULL,
|
||||
`value4` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`id1`,`id2`,`id3`,`id4`)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
/* ORDERING KEY : (null) */;
|
||||
|
||||
LOCK TABLES `r1` WRITE;
|
||||
/*!40000 ALTER TABLE `r1` DISABLE KEYS */;
|
||||
INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16);
|
||||
/*!40000 ALTER TABLE `r1` ENABLE KEYS */;
|
||||
UNLOCK TABLES;
|
||||
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
|
||||
|
||||
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
|
||||
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
|
||||
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
||||
|
||||
SET GLOBAL binlog_format=statement;
|
||||
SET GLOBAL binlog_format=row;
|
||||
drop table r1;
|
||||
|
@@ -2,7 +2,55 @@ DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS VAR_POP;
|
||||
DROP TABLE IF EXISTS TEMP0;
|
||||
DROP TABLE IF EXISTS VAR_SAMP;
|
||||
DROP TABLE IF EXISTS ti;
|
||||
DROP TABLE IF EXISTS members;
|
||||
DROP TABLE IF EXISTS members_2;
|
||||
DROP TABLE IF EXISTS employees;
|
||||
DROP TABLE IF EXISTS employees_2;
|
||||
DROP TABLE IF EXISTS employees_3;
|
||||
DROP TABLE IF EXISTS quarterly_report_status;
|
||||
DROP TABLE IF EXISTS employees_4;
|
||||
DROP TABLE IF EXISTS h2;
|
||||
DROP TABLE IF EXISTS rcx;
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS rc1;
|
||||
DROP TABLE IF EXISTS rx;
|
||||
DROP TABLE IF EXISTS rc2;
|
||||
DROP TABLE IF EXISTS rc3;
|
||||
DROP TABLE IF EXISTS rc4;
|
||||
DROP TABLE IF EXISTS employees_by_lname;
|
||||
DROP TABLE IF EXISTS customers_1;
|
||||
DROP TABLE IF EXISTS customers_2;
|
||||
DROP TABLE IF EXISTS customers_3;
|
||||
DROP TABLE IF EXISTS employees_hash;
|
||||
DROP TABLE IF EXISTS employees_hash_1;
|
||||
DROP TABLE IF EXISTS t1_hash;
|
||||
DROP TABLE IF EXISTS employees_linear_hash;
|
||||
DROP TABLE IF EXISTS t1_linear_hash;
|
||||
DROP TABLE IF EXISTS k1;
|
||||
DROP TABLE IF EXISTS k2;
|
||||
DROP TABLE IF EXISTS tm1;
|
||||
DROP TABLE IF EXISTS tk;
|
||||
DROP TABLE IF EXISTS ts;
|
||||
DROP TABLE IF EXISTS ts_1;
|
||||
DROP TABLE IF EXISTS ts_3;
|
||||
DROP TABLE IF EXISTS ts_4;
|
||||
DROP TABLE IF EXISTS ts_5;
|
||||
DROP TABLE IF EXISTS trb3;
|
||||
DROP TABLE IF EXISTS tr;
|
||||
DROP TABLE IF EXISTS members_3;
|
||||
DROP TABLE IF EXISTS clients;
|
||||
DROP TABLE IF EXISTS clients_lk;
|
||||
DROP TABLE IF EXISTS trb1;
|
||||
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize status OK
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 repair status OK
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
TEMP0
|
||||
@@ -24,7 +72,614 @@ i j k
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1000
|
||||
DROP TABLE t1;
|
||||
DROP TABLE VAR_POP;
|
||||
DROP TABLE TEMP0;
|
||||
DROP TABLE VAR_SAMP;
|
||||
CREATE TABLE ti(
|
||||
id INT,
|
||||
amount DECIMAL(7,2),
|
||||
tr_date DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH(MONTH(tr_date))
|
||||
PARTITIONS 6;
|
||||
CREATE TABLE members (
|
||||
firstname VARCHAR(25) NOT NULL,
|
||||
lastname VARCHAR(25) NOT NULL,
|
||||
username VARCHAR(16) NOT NULL,
|
||||
email VARCHAR(35),
|
||||
joined DATE NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY(joined)
|
||||
PARTITIONS 6;
|
||||
CREATE TABLE members_2 (
|
||||
firstname VARCHAR(25) NOT NULL,
|
||||
lastname VARCHAR(25) NOT NULL,
|
||||
username VARCHAR(16) NOT NULL,
|
||||
email VARCHAR(35),
|
||||
joined DATE NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE(YEAR(joined)) (
|
||||
PARTITION p0 VALUES LESS THAN (1960),
|
||||
PARTITION p1 VALUES LESS THAN (1970),
|
||||
PARTITION p2 VALUES LESS THAN (1980),
|
||||
PARTITION p3 VALUES LESS THAN (1990),
|
||||
PARTITION p4 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
CREATE TABLE t2 (val INT)
|
||||
ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(val)(
|
||||
PARTITION mypart VALUES IN (1,3,5),
|
||||
PARTITION MyPart VALUES IN (2,4,6)
|
||||
);
|
||||
ERROR HY000: Duplicate partition name MyPart
|
||||
CREATE TABLE employees (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT NOT NULL,
|
||||
store_id INT NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (store_id) (
|
||||
PARTITION p0 VALUES LESS THAN (6),
|
||||
PARTITION p1 VALUES LESS THAN (11),
|
||||
PARTITION p2 VALUES LESS THAN (16),
|
||||
PARTITION p3 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
CREATE TABLE employees_2 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT NOT NULL,
|
||||
store_id INT NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (job_code) (
|
||||
PARTITION p0 VALUES LESS THAN (100),
|
||||
PARTITION p1 VALUES LESS THAN (1000),
|
||||
PARTITION p2 VALUES LESS THAN (10000)
|
||||
);
|
||||
CREATE TABLE employees_3 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (YEAR(separated)) (
|
||||
PARTITION p0 VALUES LESS THAN (1991),
|
||||
PARTITION p1 VALUES LESS THAN (1996),
|
||||
PARTITION p2 VALUES LESS THAN (2001),
|
||||
PARTITION p3 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
CREATE TABLE quarterly_report_status (
|
||||
report_id INT NOT NULL,
|
||||
report_status VARCHAR(20) NOT NULL,
|
||||
report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) (
|
||||
PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ),
|
||||
PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ),
|
||||
PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ),
|
||||
PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ),
|
||||
PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ),
|
||||
PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ),
|
||||
PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ),
|
||||
PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ),
|
||||
PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ),
|
||||
PARTITION p9 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
CREATE TABLE employees_4 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(store_id) (
|
||||
PARTITION pNorth VALUES IN (3,5,6,9,17),
|
||||
PARTITION pEast VALUES IN (1,2,10,11,19,20),
|
||||
PARTITION pWest VALUES IN (4,12,13,14,18),
|
||||
PARTITION pCentral VALUES IN (7,8,15,16)
|
||||
);
|
||||
CREATE TABLE h2 (
|
||||
c1 INT,
|
||||
c2 INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION p0 VALUES IN (1, 4, 7),
|
||||
PARTITION p1 VALUES IN (2, 5, 8)
|
||||
);
|
||||
INSERT INTO h2 VALUES (3, 5);
|
||||
ERROR HY000: Table has no partition for value 3
|
||||
CREATE TABLE rcx (
|
||||
a INT,
|
||||
b INT,
|
||||
c CHAR(3),
|
||||
d INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,d,c) (
|
||||
PARTITION p0 VALUES LESS THAN (5,10,'ggg'),
|
||||
PARTITION p1 VALUES LESS THAN (10,20,'mmm'),
|
||||
PARTITION p2 VALUES LESS THAN (15,30,'sss'),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
|
||||
);
|
||||
CREATE TABLE r1 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (a) (
|
||||
PARTITION p0 VALUES LESS THAN (5),
|
||||
PARTITION p1 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
INSERT INTO r1 VALUES (5,10), (5,11), (5,12);
|
||||
CREATE TABLE rc1 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a, b) (
|
||||
PARTITION p0 VALUES LESS THAN (5, 12),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE)
|
||||
);
|
||||
INSERT INTO rc1 VALUES (5,10), (5,11), (5,12);
|
||||
SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12);
|
||||
(5,10) < (5,12) (5,11) < (5,12) (5,12) < (5,12)
|
||||
1 1 0
|
||||
CREATE TABLE rx (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS (a) (
|
||||
PARTITION p0 VALUES LESS THAN (5),
|
||||
PARTITION p1 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
INSERT INTO rx VALUES (5,10), (5,11), (5,12);
|
||||
CREATE TABLE rc2 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b) (
|
||||
PARTITION p0 VALUES LESS THAN (0,10),
|
||||
PARTITION p1 VALUES LESS THAN (10,20),
|
||||
PARTITION p2 VALUES LESS THAN (10,30),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
|
||||
);
|
||||
CREATE TABLE rc3 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b) (
|
||||
PARTITION p0 VALUES LESS THAN (0,10),
|
||||
PARTITION p1 VALUES LESS THAN (10,20),
|
||||
PARTITION p2 VALUES LESS THAN (10,30),
|
||||
PARTITION p3 VALUES LESS THAN (10,35),
|
||||
PARTITION p4 VALUES LESS THAN (20,40),
|
||||
PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
|
||||
);
|
||||
CREATE TABLE rc4 (
|
||||
a INT,
|
||||
b INT,
|
||||
c INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b,c) (
|
||||
PARTITION p0 VALUES LESS THAN (0,25,50),
|
||||
PARTITION p1 VALUES LESS THAN (10,20,100),
|
||||
PARTITION p2 VALUES LESS THAN (10,30,50),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
|
||||
);
|
||||
SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50);
|
||||
(0,25,50) < (10,20,100) (10,20,100) < (10,30,50)
|
||||
1 1
|
||||
CREATE TABLE rcf (
|
||||
a INT,
|
||||
b INT,
|
||||
c INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b,c) (
|
||||
PARTITION p0 VALUES LESS THAN (0,25,50),
|
||||
PARTITION p1 VALUES LESS THAN (20,20,100),
|
||||
PARTITION p2 VALUES LESS THAN (10,30,50),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
|
||||
);
|
||||
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
|
||||
CREATE TABLE employees_by_lname (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT NOT NULL,
|
||||
store_id INT NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS (lname) (
|
||||
PARTITION p0 VALUES LESS THAN ('g'),
|
||||
PARTITION p1 VALUES LESS THAN ('m'),
|
||||
PARTITION p2 VALUES LESS THAN ('t'),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) (
|
||||
PARTITION p0 VALUES LESS THAN ('g'),
|
||||
PARTITION p1 VALUES LESS THAN ('m'),
|
||||
PARTITION p2 VALUES LESS THAN ('t'),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) (
|
||||
PARTITION p0 VALUES LESS THAN ('1970-01-01'),
|
||||
PARTITION p1 VALUES LESS THAN ('1980-01-01'),
|
||||
PARTITION p2 VALUES LESS THAN ('1990-01-01'),
|
||||
PARTITION p3 VALUES LESS THAN ('2000-01-01'),
|
||||
PARTITION p4 VALUES LESS THAN ('2010-01-01'),
|
||||
PARTITION p5 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
CREATE TABLE customers_1 (
|
||||
first_name VARCHAR(25),
|
||||
last_name VARCHAR(25),
|
||||
street_1 VARCHAR(30),
|
||||
street_2 VARCHAR(30),
|
||||
city VARCHAR(15),
|
||||
renewal DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST COLUMNS(city) (
|
||||
PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'Mönsterås'),
|
||||
PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'),
|
||||
PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'),
|
||||
PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo')
|
||||
);
|
||||
CREATE TABLE customers_2 (
|
||||
first_name VARCHAR(25),
|
||||
last_name VARCHAR(25),
|
||||
street_1 VARCHAR(30),
|
||||
street_2 VARCHAR(30),
|
||||
city VARCHAR(15),
|
||||
renewal DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST COLUMNS(renewal) (
|
||||
PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03',
|
||||
'2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'),
|
||||
PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10',
|
||||
'2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'),
|
||||
PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17',
|
||||
'2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'),
|
||||
PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24',
|
||||
'2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28')
|
||||
);
|
||||
CREATE TABLE customers_3 (
|
||||
first_name VARCHAR(25),
|
||||
last_name VARCHAR(25),
|
||||
street_1 VARCHAR(30),
|
||||
street_2 VARCHAR(30),
|
||||
city VARCHAR(15),
|
||||
renewal DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(renewal) (
|
||||
PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'),
|
||||
PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'),
|
||||
PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'),
|
||||
PARTITION pWeek_4 VALUES LESS THAN('2010-03-01')
|
||||
);
|
||||
CREATE TABLE employees_hash (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH(store_id)
|
||||
PARTITIONS 4;
|
||||
CREATE TABLE employees_hash_1 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH( YEAR(hired) )
|
||||
PARTITIONS 4;
|
||||
CREATE TABLE t1_hash (
|
||||
col1 INT,
|
||||
col2 CHAR(5),
|
||||
col3 DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH( YEAR(col3) )
|
||||
PARTITIONS 4;
|
||||
CREATE TABLE employees_linear_hash (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR HASH( YEAR(hired) )
|
||||
PARTITIONS 4;
|
||||
CREATE TABLE t1_linear_hash (
|
||||
col1 INT,
|
||||
col2 CHAR(5),
|
||||
col3 DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR HASH( YEAR(col3) )
|
||||
PARTITIONS 6;
|
||||
CREATE TABLE k1 (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
name VARCHAR(20)
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY()
|
||||
PARTITIONS 2;
|
||||
CREATE TABLE k2 (
|
||||
id INT NOT NULL,
|
||||
name VARCHAR(20),
|
||||
UNIQUE KEY (id)
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY()
|
||||
PARTITIONS 2;
|
||||
CREATE TABLE tm1 (
|
||||
s1 CHAR(32) PRIMARY KEY
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY(s1)
|
||||
PARTITIONS 10;
|
||||
CREATE TABLE tk (
|
||||
col1 INT NOT NULL,
|
||||
col2 CHAR(5),
|
||||
col3 DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR KEY (col1)
|
||||
PARTITIONS 3;
|
||||
CREATE TABLE ts (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) )
|
||||
SUBPARTITIONS 2 (
|
||||
PARTITION p0 VALUES LESS THAN (1990),
|
||||
PARTITION p1 VALUES LESS THAN (2000),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
CREATE TABLE ts_1 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s4,
|
||||
SUBPARTITION s5
|
||||
)
|
||||
);
|
||||
CREATE TABLE ts_2 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
)
|
||||
);
|
||||
ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
)
|
||||
)' at line 11
|
||||
CREATE TABLE ts_3 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s4,
|
||||
SUBPARTITION s5
|
||||
)
|
||||
);
|
||||
CREATE TABLE ts_4 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s4,
|
||||
SUBPARTITION s5
|
||||
)
|
||||
);
|
||||
CREATE TABLE ts_5 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE(YEAR(purchased))
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0a,
|
||||
SUBPARTITION s0b
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s1a,
|
||||
SUBPARTITION s1b
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s2a,
|
||||
SUBPARTITION s2b
|
||||
)
|
||||
);
|
||||
CREATE TABLE trb3 (
|
||||
id INT,
|
||||
name VARCHAR(50),
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990),
|
||||
PARTITION p1 VALUES LESS THAN (1995),
|
||||
PARTITION p2 VALUES LESS THAN (2000),
|
||||
PARTITION p3 VALUES LESS THAN (2005)
|
||||
);
|
||||
ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2;
|
||||
CREATE TABLE tr (
|
||||
id INT,
|
||||
name VARCHAR(50),
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990),
|
||||
PARTITION p1 VALUES LESS THAN (1995),
|
||||
PARTITION p2 VALUES LESS THAN (2000),
|
||||
PARTITION p3 VALUES LESS THAN (2005)
|
||||
);
|
||||
INSERT INTO tr VALUES
|
||||
(1, 'desk organiser', '2003-10-15'),
|
||||
(2, 'CD player', '1993-11-05'),
|
||||
(3, 'TV set', '1996-03-10'),
|
||||
(4, 'bookcase', '1982-01-10'),
|
||||
(5, 'exercise bike', '2004-05-09'),
|
||||
(6, 'sofa', '1987-06-05'),
|
||||
(7, 'popcorn maker', '2001-11-22'),
|
||||
(8, 'aquarium', '1992-08-04'),
|
||||
(9, 'study desk', '1984-09-16'),
|
||||
(10, 'lava lamp', '1998-12-25');
|
||||
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
|
||||
id name purchased
|
||||
3 TV set 1996-03-10
|
||||
10 lava lamp 1998-12-25
|
||||
ALTER TABLE tr DROP PARTITION p2;
|
||||
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
|
||||
id name purchased
|
||||
CREATE TABLE members_3 (
|
||||
id INT,
|
||||
fname VARCHAR(25),
|
||||
lname VARCHAR(25),
|
||||
dob DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(dob) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1970),
|
||||
PARTITION p1 VALUES LESS THAN (1980),
|
||||
PARTITION p2 VALUES LESS THAN (1990)
|
||||
);
|
||||
ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000));
|
||||
ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960));
|
||||
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
|
||||
CREATE TABLE clients (
|
||||
id INT,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
signed DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH( MONTH(signed) )
|
||||
PARTITIONS 12;
|
||||
ALTER TABLE clients COALESCE PARTITION 4;
|
||||
CREATE TABLE clients_lk (
|
||||
id INT,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
signed DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR KEY(signed)
|
||||
PARTITIONS 12;
|
||||
ALTER TABLE clients COALESCE PARTITION 18;
|
||||
ERROR HY000: Cannot remove all partitions, use DROP TABLE instead
|
||||
ALTER TABLE clients ADD PARTITION PARTITIONS 6;
|
||||
CREATE TABLE trb1 (
|
||||
id INT,
|
||||
name VARCHAR(50),
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE(id) (
|
||||
PARTITION p0 VALUES LESS THAN (3),
|
||||
PARTITION p1 VALUES LESS THAN (7),
|
||||
PARTITION p2 VALUES LESS THAN (9),
|
||||
PARTITION p3 VALUES LESS THAN (11)
|
||||
);
|
||||
INSERT INTO trb1 VALUES
|
||||
(1, 'desk organiser', '2003-10-15'),
|
||||
(2, 'CD player', '1993-11-05'),
|
||||
(3, 'TV set', '1996-03-10'),
|
||||
(4, 'bookcase', '1982-01-10'),
|
||||
(5, 'exercise bike', '2004-05-09'),
|
||||
(6, 'sofa', '1987-06-05'),
|
||||
(7, 'popcorn maker', '2001-11-22'),
|
||||
(8, 'aquarium', '1992-08-04'),
|
||||
(9, 'study desk', '1984-09-16'),
|
||||
(10, 'lava lamp', '1998-12-25');
|
||||
ALTER TABLE trb1 ADD PRIMARY KEY (id);
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS VAR_POP;
|
||||
DROP TABLE IF EXISTS TEMP0;
|
||||
DROP TABLE IF EXISTS VAR_SAMP;
|
||||
DROP TABLE IF EXISTS ti;
|
||||
DROP TABLE IF EXISTS members;
|
||||
DROP TABLE IF EXISTS members_2;
|
||||
DROP TABLE IF EXISTS employees;
|
||||
DROP TABLE IF EXISTS employees_2;
|
||||
DROP TABLE IF EXISTS employees_3;
|
||||
DROP TABLE IF EXISTS quarterly_report_status;
|
||||
DROP TABLE IF EXISTS employees_4;
|
||||
DROP TABLE IF EXISTS h2;
|
||||
DROP TABLE IF EXISTS rcx;
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS rc1;
|
||||
DROP TABLE IF EXISTS rx;
|
||||
DROP TABLE IF EXISTS rc2;
|
||||
DROP TABLE IF EXISTS rc3;
|
||||
DROP TABLE IF EXISTS rc4;
|
||||
DROP TABLE IF EXISTS employees_by_lname;
|
||||
DROP TABLE IF EXISTS customers_1;
|
||||
DROP TABLE IF EXISTS customers_2;
|
||||
DROP TABLE IF EXISTS customers_3;
|
||||
DROP TABLE IF EXISTS employees_hash;
|
||||
DROP TABLE IF EXISTS employees_hash_1;
|
||||
DROP TABLE IF EXISTS t1_hash;
|
||||
DROP TABLE IF EXISTS employees_linear_hash;
|
||||
DROP TABLE IF EXISTS t1_linear_hash;
|
||||
DROP TABLE IF EXISTS k1;
|
||||
DROP TABLE IF EXISTS k2;
|
||||
DROP TABLE IF EXISTS tm1;
|
||||
DROP TABLE IF EXISTS tk;
|
||||
DROP TABLE IF EXISTS ts;
|
||||
DROP TABLE IF EXISTS ts_1;
|
||||
DROP TABLE IF EXISTS ts_3;
|
||||
DROP TABLE IF EXISTS ts_4;
|
||||
DROP TABLE IF EXISTS ts_5;
|
||||
DROP TABLE IF EXISTS trb3;
|
||||
DROP TABLE IF EXISTS tr;
|
||||
DROP TABLE IF EXISTS members_3;
|
||||
DROP TABLE IF EXISTS clients;
|
||||
DROP TABLE IF EXISTS clients_lk;
|
||||
DROP TABLE IF EXISTS trb1;
|
||||
|
@@ -864,6 +864,7 @@ rocksdb_allow_mmap_reads OFF
|
||||
rocksdb_allow_mmap_writes OFF
|
||||
rocksdb_background_sync OFF
|
||||
rocksdb_base_background_compactions 1
|
||||
rocksdb_blind_delete_primary_key OFF
|
||||
rocksdb_block_cache_size 536870912
|
||||
rocksdb_block_restart_interval 16
|
||||
rocksdb_block_size 4096
|
||||
@@ -889,14 +890,16 @@ rocksdb_db_write_buffer_size 0
|
||||
rocksdb_deadlock_detect OFF
|
||||
rocksdb_debug_optimizer_no_zero_cardinality ON
|
||||
rocksdb_default_cf_options
|
||||
rocksdb_delayed_write_rate 16777216
|
||||
rocksdb_delete_obsolete_files_period_micros 21600000000
|
||||
rocksdb_disabledatasync OFF
|
||||
rocksdb_enable_2pc ON
|
||||
rocksdb_enable_bulk_load_api ON
|
||||
rocksdb_enable_thread_tracking OFF
|
||||
rocksdb_enable_write_thread_adaptive_yield OFF
|
||||
rocksdb_error_if_exists OFF
|
||||
rocksdb_flush_log_at_trx_commit 1
|
||||
rocksdb_flush_memtable_on_analyze ON
|
||||
rocksdb_force_compute_memtable_stats ON
|
||||
rocksdb_force_flush_memtable_now OFF
|
||||
rocksdb_force_index_records_in_range 0
|
||||
rocksdb_hash_index_allow_collision ON
|
||||
@@ -908,6 +911,7 @@ rocksdb_lock_scanned_rows OFF
|
||||
rocksdb_lock_wait_timeout 1
|
||||
rocksdb_log_file_time_to_roll 0
|
||||
rocksdb_manifest_preallocation_size 4194304
|
||||
rocksdb_master_skip_tx_api OFF
|
||||
rocksdb_max_background_compactions 1
|
||||
rocksdb_max_background_flushes 1
|
||||
rocksdb_max_log_file_size 0
|
||||
@@ -925,7 +929,7 @@ rocksdb_paranoid_checks ON
|
||||
rocksdb_pause_background_work ON
|
||||
rocksdb_perf_context_level 0
|
||||
rocksdb_persistent_cache_path
|
||||
rocksdb_persistent_cache_size 0
|
||||
rocksdb_persistent_cache_size_mb 0
|
||||
rocksdb_pin_l0_filter_and_index_blocks_in_cache ON
|
||||
rocksdb_print_snapshot_conflict_queries OFF
|
||||
rocksdb_rate_limiter_bytes_per_sec 0
|
||||
@@ -953,25 +957,37 @@ rocksdb_validate_tables 1
|
||||
rocksdb_verify_row_debug_checksums OFF
|
||||
rocksdb_wal_bytes_per_sync 0
|
||||
rocksdb_wal_dir
|
||||
rocksdb_wal_recovery_mode 2
|
||||
rocksdb_wal_recovery_mode 1
|
||||
rocksdb_wal_size_limit_mb 0
|
||||
rocksdb_wal_ttl_seconds 0
|
||||
rocksdb_whole_key_filtering ON
|
||||
rocksdb_write_disable_wal OFF
|
||||
rocksdb_write_ignore_missing_column_families OFF
|
||||
rocksdb_write_sync OFF
|
||||
create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
|
||||
insert into t47 values (1, 'row1');
|
||||
insert into t47 values (2, 'row2');
|
||||
set rocksdb_bulk_load=1;
|
||||
insert into t47 values (3, 'row3'),(4, 'row4');
|
||||
set rocksdb_bulk_load=0;
|
||||
connect con1,localhost,root,,;
|
||||
set rocksdb_bulk_load=1;
|
||||
insert into t47 values (10, 'row10'),(11, 'row11');
|
||||
connection default;
|
||||
set rocksdb_bulk_load=1;
|
||||
insert into t47 values (100, 'row100'),(101, 'row101');
|
||||
disconnect con1;
|
||||
connection default;
|
||||
set rocksdb_bulk_load=0;
|
||||
select * from t47;
|
||||
pk col1
|
||||
1 row1
|
||||
2 row2
|
||||
3 row3
|
||||
4 row4
|
||||
10 row10
|
||||
11 row11
|
||||
100 row100
|
||||
101 row101
|
||||
drop table t47;
|
||||
#
|
||||
# Fix TRUNCATE over empty table (transaction is committed when it wasn't
|
||||
@@ -1410,6 +1426,7 @@ rocksdb_rows_deleted #
|
||||
rocksdb_rows_inserted #
|
||||
rocksdb_rows_read #
|
||||
rocksdb_rows_updated #
|
||||
rocksdb_rows_deleted_blind #
|
||||
rocksdb_system_rows_deleted #
|
||||
rocksdb_system_rows_inserted #
|
||||
rocksdb_system_rows_read #
|
||||
@@ -1482,6 +1499,7 @@ ROCKSDB_ROWS_DELETED
|
||||
ROCKSDB_ROWS_INSERTED
|
||||
ROCKSDB_ROWS_READ
|
||||
ROCKSDB_ROWS_UPDATED
|
||||
ROCKSDB_ROWS_DELETED_BLIND
|
||||
ROCKSDB_SYSTEM_ROWS_DELETED
|
||||
ROCKSDB_SYSTEM_ROWS_INSERTED
|
||||
ROCKSDB_SYSTEM_ROWS_READ
|
||||
@@ -1556,6 +1574,7 @@ ROCKSDB_ROWS_DELETED
|
||||
ROCKSDB_ROWS_INSERTED
|
||||
ROCKSDB_ROWS_READ
|
||||
ROCKSDB_ROWS_UPDATED
|
||||
ROCKSDB_ROWS_DELETED_BLIND
|
||||
ROCKSDB_SYSTEM_ROWS_DELETED
|
||||
ROCKSDB_SYSTEM_ROWS_INSERTED
|
||||
ROCKSDB_SYSTEM_ROWS_READ
|
||||
@@ -1737,7 +1756,7 @@ key1 int,
|
||||
PRIMARY KEY (id),
|
||||
index (key1) comment 'test.t1.key1'
|
||||
) engine=rocksdb;
|
||||
ERROR HY000: Column Family Flag is different from existing flag. Assign a new CF flag, or do not change existing CF flag.
|
||||
ERROR HY000: Column family ('test.t1.key1') flag (0) is different from an existing flag (2). Assign a new CF flag, or do not change existing CF flag.
|
||||
create table t1_err (
|
||||
id int not null,
|
||||
key1 int,
|
||||
@@ -1763,7 +1782,7 @@ key1 int,
|
||||
PRIMARY KEY (id),
|
||||
index (key1) comment '$per_idnex_cf'
|
||||
)engine=rocksdb;
|
||||
ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf'
|
||||
ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf.'
|
||||
#
|
||||
# Issue #22: SELECT ... FOR UPDATE takes a long time
|
||||
#
|
||||
|
@@ -0,0 +1,409 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment';
|
||||
cf_name
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment';
|
||||
cf_name
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
set @@global.rocksdb_compact_cf = 'foo';
|
||||
set @@global.rocksdb_compact_cf = 'my_custom_cf';
|
||||
set @@global.rocksdb_compact_cf = 'baz';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo';
|
||||
cf_name
|
||||
foo
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf';
|
||||
cf_name
|
||||
my_custom_cf
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz';
|
||||
cf_name
|
||||
baz
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
set @@global.rocksdb_compact_cf = 't1-p0';
|
||||
set @@global.rocksdb_compact_cf = 'rev:bar';
|
||||
set @@global.rocksdb_compact_cf = 't1-p2';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0';
|
||||
cf_name
|
||||
t1-p0
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar';
|
||||
cf_name
|
||||
rev:bar
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2';
|
||||
cf_name
|
||||
t1-p2
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9),
|
||||
PARTITION custom_p3 VALUES IN (10, 20, 30)
|
||||
);
|
||||
set @@global.rocksdb_compact_cf = 'cf-zero';
|
||||
set @@global.rocksdb_compact_cf = 'cf-one';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero';
|
||||
cf_name
|
||||
cf-zero
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one';
|
||||
cf_name
|
||||
cf-one
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
INSERT INTO t1 VALUES (1, 1, "one", null);
|
||||
INSERT INTO t1 VALUES (2, 2, "two", null);
|
||||
INSERT INTO t1 VALUES (3, 3, "three", null);
|
||||
INSERT INTO t1 VALUES (5, 5, "five", null);
|
||||
INSERT INTO t1 VALUES (9, 9, "nine", null);
|
||||
SELECT * FROM t1;
|
||||
c1 c2 name event
|
||||
1 1 one NULL
|
||||
2 2 two NULL
|
||||
5 5 five NULL
|
||||
3 3 three NULL
|
||||
9 9 nine NULL
|
||||
ALTER TABLE t1 DROP PRIMARY KEY;
|
||||
SELECT * FROM t1;
|
||||
c1 c2 name event
|
||||
1 1 one NULL
|
||||
2 2 two NULL
|
||||
5 5 five NULL
|
||||
3 3 three NULL
|
||||
9 9 nine NULL
|
||||
set @@global.rocksdb_compact_cf = 'foo';
|
||||
set @@global.rocksdb_compact_cf = 'bar';
|
||||
set @@global.rocksdb_compact_cf = 'baz';
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
INSERT INTO t1 VALUES (1, 1, "one", null);
|
||||
INSERT INTO t1 VALUES (2, 2, "two", null);
|
||||
INSERT INTO t1 VALUES (3, 3, "three", null);
|
||||
INSERT INTO t1 VALUES (5, 5, "five", null);
|
||||
INSERT INTO t1 VALUES (9, 9, "nine", null);
|
||||
ALTER TABLE t1 DROP PRIMARY KEY;
|
||||
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf';
|
||||
set @@global.rocksdb_compact_cf = 'p0_cf';
|
||||
set @@global.rocksdb_compact_cf = 'p1_cf';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf';
|
||||
cf_name
|
||||
p0_cf
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf';
|
||||
cf_name
|
||||
p1_cf
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
INSERT INTO t1 VALUES (1, 1, "one", null);
|
||||
INSERT INTO t1 VALUES (2, 2, "two", null);
|
||||
INSERT INTO t1 VALUES (3, 3, "three", null);
|
||||
INSERT INTO t1 VALUES (5, 5, "five", null);
|
||||
INSERT INTO t1 VALUES (9, 9, "nine", null);
|
||||
ALTER TABLE t1 PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p3 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9)
|
||||
);
|
||||
ALTER TABLE t1 DROP PRIMARY KEY;
|
||||
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf';
|
||||
set @@global.rocksdb_compact_cf = 'p3_cf';
|
||||
set @@global.rocksdb_compact_cf = 'p4_cf';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf';
|
||||
cf_name
|
||||
p3_cf
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf';
|
||||
cf_name
|
||||
p4_cf
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
DROP TABLE t2;
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1'
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
set @@global.rocksdb_compact_cf = 'my_cf0';
|
||||
set @@global.rocksdb_compact_cf = 'my_cf1';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0';
|
||||
cf_name
|
||||
my_cf0
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1';
|
||||
cf_name
|
||||
my_cf1
|
||||
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
|
||||
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
|
||||
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
|
||||
col1 HEX(col2) HEX(col3) col4 HEX(col5)
|
||||
100 012345 01 1 02
|
||||
200 012345 01 1 02
|
||||
300 012345 01 1 02
|
||||
100 023456 02 1 03
|
||||
100 034567 04 1 05
|
||||
400 089ABC 04 1 05
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 3 Using where; Using index
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index
|
||||
ALTER TABLE t2 DROP PRIMARY KEY;
|
||||
ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1';
|
||||
set @@global.rocksdb_compact_cf = 'new_cf0';
|
||||
set @@global.rocksdb_compact_cf = 'new_cf1';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0';
|
||||
cf_name
|
||||
new_cf0
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1';
|
||||
cf_name
|
||||
new_cf1
|
||||
INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3);
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 4 Using where; Using index
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index
|
||||
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
|
||||
col1 HEX(col2) HEX(col3) col4 HEX(col5)
|
||||
100 012345 01 1 02
|
||||
200 012345 01 1 02
|
||||
300 012345 01 1 02
|
||||
500 012345 05 1 02
|
||||
100 023456 02 1 03
|
||||
700 023456 07 1 03
|
||||
100 034567 04 1 05
|
||||
400 089ABC 04 1 05
|
||||
DROP TABLE t2;
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
|
||||
KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5'
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0';
|
||||
cf_name
|
||||
test_cf0
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1';
|
||||
cf_name
|
||||
test_cf1
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5';
|
||||
cf_name
|
||||
test_cf5
|
||||
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
|
||||
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
|
||||
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1;
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 custom_p5 ref col2 col2 74 const,const 1 Using where
|
||||
ALTER TABLE t2 DROP KEY `col2`;
|
||||
ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5';
|
||||
cf_name
|
||||
another_cf_for_p5
|
||||
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567;
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 custom_p2 ref col3 col3 258 const 1 Using where
|
||||
DROP TABLE t2;
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
|
||||
UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5'
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5';
|
||||
cf_name
|
||||
unique_test_cf5
|
||||
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
|
||||
ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2'
|
||||
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
|
||||
ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2'
|
||||
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
|
||||
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
|
||||
DROP TABLE t2;
|
||||
CREATE TABLE t1 (
|
||||
`a` int,
|
||||
PRIMARY KEY (a) COMMENT "sharedcf"
|
||||
) ENGINE=ROCKSDB;
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf';
|
||||
cf_name
|
||||
sharedcf
|
||||
CREATE TABLE t2 (
|
||||
`a` INT,
|
||||
`b` DATE,
|
||||
`c` VARCHAR(42),
|
||||
PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf"
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(`a`) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf';
|
||||
cf_name
|
||||
notsharedcf
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
@@ -12,7 +12,6 @@ Type Name Status
|
||||
DBSTATS rocksdb #
|
||||
CF_COMPACTION __system__ #
|
||||
CF_COMPACTION cf_t1 #
|
||||
CF_COMPACTION cf_t4 #
|
||||
CF_COMPACTION default #
|
||||
CF_COMPACTION rev:cf_t2 #
|
||||
Memory_Stats rocksdb #
|
||||
@@ -48,15 +47,6 @@ cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE #
|
||||
cf_t1 NUM_ENTRIES_IMM_MEM_TABLES #
|
||||
cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE #
|
||||
cf_t1 NUM_LIVE_VERSIONS #
|
||||
cf_t4 NUM_IMMUTABLE_MEM_TABLE #
|
||||
cf_t4 MEM_TABLE_FLUSH_PENDING #
|
||||
cf_t4 COMPACTION_PENDING #
|
||||
cf_t4 CUR_SIZE_ACTIVE_MEM_TABLE #
|
||||
cf_t4 CUR_SIZE_ALL_MEM_TABLES #
|
||||
cf_t4 NUM_ENTRIES_ACTIVE_MEM_TABLE #
|
||||
cf_t4 NUM_ENTRIES_IMM_MEM_TABLES #
|
||||
cf_t4 NON_BLOCK_CACHE_SST_MEM_USAGE #
|
||||
cf_t4 NUM_LIVE_VERSIONS #
|
||||
default NUM_IMMUTABLE_MEM_TABLE #
|
||||
default MEM_TABLE_FLUSH_PENDING #
|
||||
default COMPACTION_PENDING #
|
||||
@@ -117,7 +107,6 @@ __system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS #
|
||||
__system__ ARENA_BLOCK_SIZE #
|
||||
__system__ DISABLE_AUTO_COMPACTIONS #
|
||||
__system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH #
|
||||
__system__ VERIFY_CHECKSUM_IN_COMPACTION #
|
||||
__system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
|
||||
__system__ MEMTABLE_FACTORY #
|
||||
__system__ INPLACE_UPDATE_SUPPORT #
|
||||
@@ -126,7 +115,6 @@ __system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
|
||||
__system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
|
||||
__system__ BLOOM_LOCALITY #
|
||||
__system__ MAX_SUCCESSIVE_MERGES #
|
||||
__system__ MIN_PARTIAL_MERGE_OPERANDS #
|
||||
__system__ OPTIMIZE_FILTERS_FOR_HITS #
|
||||
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
|
||||
__system__ COMPRESSION_TYPE #
|
||||
@@ -173,7 +161,6 @@ cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
|
||||
cf_t1 ARENA_BLOCK_SIZE #
|
||||
cf_t1 DISABLE_AUTO_COMPACTIONS #
|
||||
cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
|
||||
cf_t1 VERIFY_CHECKSUM_IN_COMPACTION #
|
||||
cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
|
||||
cf_t1 MEMTABLE_FACTORY #
|
||||
cf_t1 INPLACE_UPDATE_SUPPORT #
|
||||
@@ -182,7 +169,6 @@ cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
|
||||
cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
|
||||
cf_t1 BLOOM_LOCALITY #
|
||||
cf_t1 MAX_SUCCESSIVE_MERGES #
|
||||
cf_t1 MIN_PARTIAL_MERGE_OPERANDS #
|
||||
cf_t1 OPTIMIZE_FILTERS_FOR_HITS #
|
||||
cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
|
||||
cf_t1 COMPRESSION_TYPE #
|
||||
@@ -206,62 +192,6 @@ cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
|
||||
cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
|
||||
cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
|
||||
cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
|
||||
cf_t4 COMPARATOR #
|
||||
cf_t4 MERGE_OPERATOR #
|
||||
cf_t4 COMPACTION_FILTER #
|
||||
cf_t4 COMPACTION_FILTER_FACTORY #
|
||||
cf_t4 WRITE_BUFFER_SIZE #
|
||||
cf_t4 MAX_WRITE_BUFFER_NUMBER #
|
||||
cf_t4 MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
|
||||
cf_t4 NUM_LEVELS #
|
||||
cf_t4 LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
|
||||
cf_t4 LEVEL0_SLOWDOWN_WRITES_TRIGGER #
|
||||
cf_t4 LEVEL0_STOP_WRITES_TRIGGER #
|
||||
cf_t4 MAX_MEM_COMPACTION_LEVEL #
|
||||
cf_t4 TARGET_FILE_SIZE_BASE #
|
||||
cf_t4 TARGET_FILE_SIZE_MULTIPLIER #
|
||||
cf_t4 MAX_BYTES_FOR_LEVEL_BASE #
|
||||
cf_t4 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
|
||||
cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER #
|
||||
cf_t4 SOFT_RATE_LIMIT #
|
||||
cf_t4 HARD_RATE_LIMIT #
|
||||
cf_t4 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
|
||||
cf_t4 ARENA_BLOCK_SIZE #
|
||||
cf_t4 DISABLE_AUTO_COMPACTIONS #
|
||||
cf_t4 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
|
||||
cf_t4 VERIFY_CHECKSUM_IN_COMPACTION #
|
||||
cf_t4 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
|
||||
cf_t4 MEMTABLE_FACTORY #
|
||||
cf_t4 INPLACE_UPDATE_SUPPORT #
|
||||
cf_t4 INPLACE_UPDATE_NUM_LOCKS #
|
||||
cf_t4 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
|
||||
cf_t4 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
|
||||
cf_t4 BLOOM_LOCALITY #
|
||||
cf_t4 MAX_SUCCESSIVE_MERGES #
|
||||
cf_t4 MIN_PARTIAL_MERGE_OPERANDS #
|
||||
cf_t4 OPTIMIZE_FILTERS_FOR_HITS #
|
||||
cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
|
||||
cf_t4 COMPRESSION_TYPE #
|
||||
cf_t4 COMPRESSION_PER_LEVEL #
|
||||
cf_t4 COMPRESSION_OPTS #
|
||||
cf_t4 BOTTOMMOST_COMPRESSION #
|
||||
cf_t4 PREFIX_EXTRACTOR #
|
||||
cf_t4 COMPACTION_STYLE #
|
||||
cf_t4 COMPACTION_OPTIONS_UNIVERSAL #
|
||||
cf_t4 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
|
||||
cf_t4 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
|
||||
default COMPARATOR #
|
||||
default MERGE_OPERATOR #
|
||||
default COMPACTION_FILTER #
|
||||
@@ -285,7 +215,6 @@ default RATE_LIMIT_DELAY_MAX_MILLISECONDS #
|
||||
default ARENA_BLOCK_SIZE #
|
||||
default DISABLE_AUTO_COMPACTIONS #
|
||||
default PURGE_REDUNDANT_KVS_WHILE_FLUSH #
|
||||
default VERIFY_CHECKSUM_IN_COMPACTION #
|
||||
default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
|
||||
default MEMTABLE_FACTORY #
|
||||
default INPLACE_UPDATE_SUPPORT #
|
||||
@@ -294,7 +223,6 @@ default MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
|
||||
default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
|
||||
default BLOOM_LOCALITY #
|
||||
default MAX_SUCCESSIVE_MERGES #
|
||||
default MIN_PARTIAL_MERGE_OPERANDS #
|
||||
default OPTIMIZE_FILTERS_FOR_HITS #
|
||||
default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
|
||||
default COMPRESSION_TYPE #
|
||||
@@ -341,7 +269,6 @@ rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
|
||||
rev:cf_t2 ARENA_BLOCK_SIZE #
|
||||
rev:cf_t2 DISABLE_AUTO_COMPACTIONS #
|
||||
rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
|
||||
rev:cf_t2 VERIFY_CHECKSUM_IN_COMPACTION #
|
||||
rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
|
||||
rev:cf_t2 MEMTABLE_FACTORY #
|
||||
rev:cf_t2 INPLACE_UPDATE_SUPPORT #
|
||||
@@ -350,7 +277,6 @@ rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
|
||||
rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
|
||||
rev:cf_t2 BLOOM_LOCALITY #
|
||||
rev:cf_t2 MAX_SUCCESSIVE_MERGES #
|
||||
rev:cf_t2 MIN_PARTIAL_MERGE_OPERANDS #
|
||||
rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS #
|
||||
rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
|
||||
rev:cf_t2 COMPRESSION_TYPE #
|
||||
|
@@ -1,20 +1,23 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '<DATA_DIR>' INDEX DIRECTORY = '<INDEX_DIR>';
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) NOT NULL,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
ALTER TABLE t1 INDEX DIRECTORY = '<DATA_DIR>';
|
||||
Warnings:
|
||||
Warning 1618 <INDEX DIRECTORY> option ignored
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) NOT NULL,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data';
|
||||
ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index';
|
||||
ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
|
||||
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id)
|
||||
(
|
||||
PARTITION P0 VALUES LESS THAN (1000)
|
||||
DATA DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P1 VALUES LESS THAN (2000)
|
||||
DATA DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P2 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
|
||||
CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id)
|
||||
(
|
||||
PARTITION P0 VALUES LESS THAN (1000)
|
||||
INDEX DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P1 VALUES LESS THAN (2000)
|
||||
INDEX DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P2 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
|
||||
|
@@ -26,23 +26,23 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
@@ -54,7 +54,7 @@ id value value2
|
||||
9 9 9
|
||||
10 10 10
|
||||
11 11 11
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
@@ -93,23 +93,23 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
@@ -121,7 +121,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
@@ -159,22 +159,22 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
@@ -185,7 +185,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
@@ -221,21 +221,21 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
@@ -246,8 +246,9 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
100
|
||||
2
|
||||
3
|
||||
4
|
||||
@@ -256,7 +257,6 @@ value
|
||||
8
|
||||
9
|
||||
10
|
||||
100
|
||||
rollback;
|
||||
begin;
|
||||
update t1 set id=100 where id=1;
|
||||
@@ -283,22 +283,22 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
100 1 1
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
1
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
100 1 1
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
3 3 3
|
||||
@@ -309,9 +309,8 @@ id value value2
|
||||
9 9 9
|
||||
10 10 10
|
||||
100 1 1
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
@@ -320,6 +319,7 @@ value
|
||||
8
|
||||
9
|
||||
10
|
||||
1
|
||||
rollback;
|
||||
begin;
|
||||
update t1 set value2=100 where value=1;
|
||||
@@ -346,22 +346,22 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
@@ -372,7 +372,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
@@ -408,21 +408,21 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
@@ -433,8 +433,9 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
100
|
||||
2
|
||||
3
|
||||
4
|
||||
@@ -443,7 +444,6 @@ value
|
||||
8
|
||||
9
|
||||
10
|
||||
100
|
||||
rollback;
|
||||
begin;
|
||||
update t1 set id=100 where value=1;
|
||||
@@ -470,22 +470,22 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
100 1 1
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
1
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
100 1 1
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
3 3 3
|
||||
@@ -496,9 +496,8 @@ id value value2
|
||||
9 9 9
|
||||
10 10 10
|
||||
100 1 1
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
@@ -507,6 +506,7 @@ value
|
||||
8
|
||||
9
|
||||
10
|
||||
1
|
||||
rollback;
|
||||
begin;
|
||||
update t1 set value2=100 where value2=1;
|
||||
@@ -533,22 +533,22 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 1 100
|
||||
2 2 2
|
||||
@@ -559,7 +559,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
@@ -595,21 +595,21 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 100 1
|
||||
2 2 2
|
||||
@@ -620,8 +620,9 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
100
|
||||
2
|
||||
3
|
||||
4
|
||||
@@ -630,7 +631,6 @@ value
|
||||
8
|
||||
9
|
||||
10
|
||||
100
|
||||
rollback;
|
||||
begin;
|
||||
update t1 set id=100 where value2=1;
|
||||
@@ -657,22 +657,22 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
100 1 1
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
1
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
100 1 1
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
3 3 3
|
||||
@@ -683,9 +683,8 @@ id value value2
|
||||
9 9 9
|
||||
10 10 10
|
||||
100 1 1
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
@@ -694,6 +693,7 @@ value
|
||||
8
|
||||
9
|
||||
10
|
||||
1
|
||||
rollback;
|
||||
begin;
|
||||
delete from t1 where id=1;
|
||||
@@ -717,19 +717,19 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
3 3 3
|
||||
@@ -739,7 +739,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
2
|
||||
3
|
||||
@@ -772,19 +772,19 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
3 3 3
|
||||
@@ -794,7 +794,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
2
|
||||
3
|
||||
@@ -827,19 +827,19 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
5 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
2 2 2
|
||||
3 3 3
|
||||
@@ -849,7 +849,7 @@ id value value2
|
||||
8 8 8
|
||||
9 9 9
|
||||
10 10 10
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
2
|
||||
3
|
||||
@@ -892,23 +892,23 @@ value
|
||||
select * from t1 where value2=5;
|
||||
id value value2
|
||||
100 5 5
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where id < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select * from t1 where value < 3;
|
||||
select * from t1 where value < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select value from t1 where value < 3;
|
||||
select value from t1 where value < 3 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
select * from t1;
|
||||
select * from t1 order by id;
|
||||
id value value2
|
||||
1 1 1
|
||||
2 2 2
|
||||
@@ -920,17 +920,17 @@ id value value2
|
||||
13 13 13
|
||||
100 5 5
|
||||
115 3 3
|
||||
select value from t1;
|
||||
select value from t1 order by id;
|
||||
value
|
||||
1
|
||||
2
|
||||
3
|
||||
5
|
||||
103
|
||||
6
|
||||
10
|
||||
11
|
||||
12
|
||||
13
|
||||
103
|
||||
5
|
||||
3
|
||||
rollback;
|
||||
drop table t1;
|
||||
|
@@ -70,3 +70,15 @@ id id2 value
|
||||
1 1 1
|
||||
set debug_sync='RESET';
|
||||
drop table t1, t2;
|
||||
drop table if exists t1,t2,t3;
|
||||
create table t1 (id int, value int, primary key (id)) engine=rocksdb;
|
||||
create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
|
||||
create table t3 (id int, value int) engine=rocksdb;
|
||||
SET @old_val = @@session.unique_checks;
|
||||
set @@session.unique_checks = FALSE;
|
||||
insert into t1 values (1, 1), (1, 2);
|
||||
insert into t2 values (1, 1, 1), (1, 2, 1);
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
insert into t3 values (1, 1), (1, 1);
|
||||
set @@session.unique_checks = @old_val;
|
||||
drop table t1, t2, t3;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
SET GLOBAL rocksdb_write_disable_wal=false;
|
||||
SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
|
||||
create table aaa (id int primary key, i int) engine rocksdb;
|
||||
SET LOCAL rocksdb_write_sync=off;
|
||||
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
insert aaa(id, i) values(1,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
@@ -15,7 +15,7 @@ insert aaa(id, i) values(3,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
variable_value-@a
|
||||
0
|
||||
SET LOCAL rocksdb_write_sync=1;
|
||||
SET LOCAL rocksdb_flush_log_at_trx_commit=1;
|
||||
insert aaa(id, i) values(4,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
variable_value-@a
|
||||
@@ -29,11 +29,11 @@ select variable_value-@a from information_schema.global_status where variable_na
|
||||
variable_value-@a
|
||||
3
|
||||
SET GLOBAL rocksdb_background_sync=on;
|
||||
SET LOCAL rocksdb_write_sync=off;
|
||||
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
|
||||
insert aaa(id, i) values(7,1);
|
||||
truncate table aaa;
|
||||
drop table aaa;
|
||||
SET GLOBAL rocksdb_write_sync=off;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
SET GLOBAL rocksdb_write_disable_wal=false;
|
||||
SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
|
||||
SET GLOBAL rocksdb_background_sync=off;
|
||||
|
@@ -14,7 +14,7 @@ CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=
|
||||
|
||||
--echo # 2PC enabled, MyRocks durability enabled
|
||||
SET GLOBAL rocksdb_enable_2pc=0;
|
||||
SET GLOBAL rocksdb_write_sync=1;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
|
||||
--echo ## 2PC + durability + single thread
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
@@ -29,7 +29,7 @@ select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true'
|
||||
|
||||
--echo # 2PC enabled, MyRocks durability disabled
|
||||
SET GLOBAL rocksdb_enable_2pc=0;
|
||||
SET GLOBAL rocksdb_write_sync=0;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
|
||||
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
|
||||
@@ -42,7 +42,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
|
||||
|
||||
--echo # 2PC disabled, MyRocks durability enabled
|
||||
SET GLOBAL rocksdb_enable_2pc=1;
|
||||
SET GLOBAL rocksdb_write_sync=1;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
|
||||
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
|
||||
@@ -59,6 +59,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
|
||||
|
||||
|
||||
SET GLOBAL rocksdb_enable_2pc=1;
|
||||
SET GLOBAL rocksdb_write_sync=0;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
DROP TABLE t1;
|
||||
DROP DATABASE mysqlslap;
|
||||
|
@@ -341,5 +341,3 @@ while ($i <= $max) {
|
||||
#SHOW TABLE STATUS WHERE name LIKE 't1';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
|
@@ -0,0 +1 @@
|
||||
--rocksdb_table_stats_sampling_pct=100
|
@@ -0,0 +1,44 @@
|
||||
--source include/have_rocksdb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
# Test that fast secondary index creation updates cardinality properly
|
||||
CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB;
|
||||
INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
|
||||
|
||||
SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed';
|
||||
send ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
|
||||
|
||||
connect (con1,localhost,root,,);
|
||||
|
||||
# Flush memtable out to SST
|
||||
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
|
||||
SET debug_sync= 'now SIGNAL flushed';
|
||||
|
||||
connection default;
|
||||
reap;
|
||||
|
||||
# Return the data for the primary key of t1
|
||||
--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
|
||||
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
|
||||
WHERE INDEX_NUMBER =
|
||||
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
|
||||
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
|
||||
|
||||
# Return the data for the secondary index of t1
|
||||
--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
|
||||
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
|
||||
WHERE INDEX_NUMBER =
|
||||
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
|
||||
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj");
|
||||
|
||||
disconnect con1;
|
||||
SET debug_sync='RESET';
|
||||
|
||||
# cleanup
|
||||
DROP TABLE t1;
|
||||
|
@@ -0,0 +1,11 @@
|
||||
!include suite/rpl/my.cnf
|
||||
|
||||
[mysqld.1]
|
||||
sync_binlog=0
|
||||
binlog_format=row
|
||||
slave-exec-mode=strict
|
||||
|
||||
[mysqld.2]
|
||||
sync_binlog=0
|
||||
binlog_format=row
|
||||
slave-exec-mode=strict
|
@@ -0,0 +1,129 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
source include/master-slave.inc;
|
||||
|
||||
connection master;
|
||||
|
||||
set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key;
|
||||
set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api;
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1,t2;
|
||||
--enable_warnings
|
||||
create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb;
|
||||
create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
|
||||
|
||||
--disable_query_log
|
||||
let $t = 1;
|
||||
while ($t <= 2) {
|
||||
let $i = 1;
|
||||
while ($i <= 10000) {
|
||||
let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150));
|
||||
inc $i;
|
||||
eval $insert;
|
||||
}
|
||||
inc $t;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
SET session rocksdb_blind_delete_primary_key=1;
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
# Deleting 1000 rows from t1
|
||||
--disable_query_log
|
||||
let $i = 1;
|
||||
while ($i <= 1000) {
|
||||
let $insert = DELETE FROM t1 WHERE id=$i;
|
||||
inc $i;
|
||||
eval $insert;
|
||||
}
|
||||
--enable_query_log
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
SELECT count(*) FROM t1;
|
||||
|
||||
--source include/sync_slave_sql_with_master.inc
|
||||
connection slave;
|
||||
SELECT count(*) FROM t1;
|
||||
connection master;
|
||||
|
||||
# Deleting 1000 rows from t2 (blind delete disabled because of secondary key)
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
--disable_query_log
|
||||
let $i = 1;
|
||||
while ($i <= 1000) {
|
||||
let $insert = DELETE FROM t2 WHERE id=$i;
|
||||
inc $i;
|
||||
eval $insert;
|
||||
}
|
||||
--enable_query_log
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
SELECT count(*) FROM t2;
|
||||
|
||||
SET session rocksdb_master_skip_tx_api=1;
|
||||
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
--disable_query_log
|
||||
let $t = 1;
|
||||
while ($t <= 2) {
|
||||
let $i = 1001;
|
||||
while ($i <= 2000) {
|
||||
let $insert = DELETE FROM t$t WHERE id=$i;
|
||||
inc $i;
|
||||
eval $insert;
|
||||
}
|
||||
inc $t;
|
||||
}
|
||||
--enable_query_log
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
SELECT count(*) FROM t1;
|
||||
SELECT count(*) FROM t2;
|
||||
--source include/sync_slave_sql_with_master.inc
|
||||
connection slave;
|
||||
SELECT count(*) FROM t1;
|
||||
SELECT count(*) FROM t2;
|
||||
connection master;
|
||||
|
||||
|
||||
# Range Deletes (blind delete disabled)
|
||||
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000;
|
||||
DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000;
|
||||
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
|
||||
SELECT count(*) FROM t1;
|
||||
SELECT count(*) FROM t2;
|
||||
--source include/sync_slave_sql_with_master.inc
|
||||
connection slave;
|
||||
SELECT count(*) FROM t1;
|
||||
SELECT count(*) FROM t2;
|
||||
connection master;
|
||||
|
||||
|
||||
# Deleting same keys (slaves stop)
|
||||
DELETE FROM t1 WHERE id = 10;
|
||||
SELECT count(*) FROM t1;
|
||||
connection slave;
|
||||
call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*");
|
||||
call mtr.add_suppression("Slave: Can't find record in 't1'.*");
|
||||
# wait until we have the expected error
|
||||
--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND)
|
||||
--source include/wait_for_slave_sql_error.inc
|
||||
|
||||
connection slave;
|
||||
set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables;
|
||||
set global rocksdb_read_free_rpl_tables="t.*";
|
||||
START SLAVE;
|
||||
connection master;
|
||||
--source include/sync_slave_sql_with_master.inc
|
||||
connection slave;
|
||||
SELECT count(*) FROM t1;
|
||||
connection master;
|
||||
|
||||
|
||||
# cleanup
|
||||
connection slave;
|
||||
set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables;
|
||||
connection master;
|
||||
SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key;
|
||||
SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
--source include/rpl_end.inc
|
@@ -1,7 +1,7 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1, t2;
|
||||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
--enable_warnings
|
||||
|
||||
# Create a table with a primary key and one secondary key as well as one
|
||||
@@ -25,7 +25,7 @@ CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE
|
||||
perl;
|
||||
my $fn = $ENV{'ROCKSDB_INFILE'};
|
||||
open(my $fh, '>>', $fn) || die "perl open($fn): $!";
|
||||
my $max = 10000000;
|
||||
my $max = 5000000;
|
||||
my @chars = ("A".."Z", "a".."z", "0".."9");
|
||||
my @lowerchars = ("a".."z");
|
||||
my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1);
|
||||
|
@@ -177,5 +177,8 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro
|
||||
CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
|
||||
DROP TABLE abc;
|
||||
|
||||
# test bad regex (null caused a crash) - Issue 493
|
||||
SET GLOBAL rocksdb_strict_collation_exceptions=null;
|
||||
|
||||
# cleanup
|
||||
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
|
||||
|
@@ -37,7 +37,7 @@ SELECT * FROM t1;
|
||||
SHOW SESSION STATUS LIKE 'Handler_read%';
|
||||
|
||||
FLUSH STATUS;
|
||||
SELECT * FROM t1 WHERE b <=5 ORDER BY b;
|
||||
SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b;
|
||||
SHOW SESSION STATUS LIKE 'Handler_read%';
|
||||
|
||||
FLUSH STATUS;
|
||||
@@ -50,4 +50,3 @@ SHOW SESSION STATUS LIKE 'Handler_read%';
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1;
|
||||
|
||||
|
@@ -35,6 +35,6 @@ insert into linktable (id1, link_type, id2) values (2, 1, 9);
|
||||
insert into linktable (id1, link_type, id2) values (2, 1, 10);
|
||||
|
||||
--replace_column 9 #
|
||||
explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
|
||||
explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
|
||||
|
||||
drop table linktable;
|
||||
|
29
storage/rocksdb/mysql-test/rocksdb/t/issue495.test
Normal file
29
storage/rocksdb/mysql-test/rocksdb/t/issue495.test
Normal file
@@ -0,0 +1,29 @@
|
||||
drop table if exists t;
|
||||
create table t (
|
||||
a int,
|
||||
b int,
|
||||
c varchar(12249) collate latin1_bin,
|
||||
d datetime,
|
||||
e int,
|
||||
f int,
|
||||
g blob,
|
||||
h int,
|
||||
i int,
|
||||
key (b,e),
|
||||
key (h,b)
|
||||
) engine=rocksdb
|
||||
partition by linear hash (i) partitions 8 ;
|
||||
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
insert into t values (1,1,'a',now(),1,1,'a',1,1);
|
||||
select i from t group by h;
|
||||
select i from t group by h;
|
||||
|
||||
drop table t;
|
||||
|
@@ -51,6 +51,9 @@ SET GLOBAL default_storage_engine=rocksdb;
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test
|
||||
--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l
|
||||
|
||||
# Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect)
|
||||
--echo ==== mysqldump with --innodb-stats-on-metadata ====
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test
|
||||
|
||||
# wiping general log so that this test case doesn't fail with --repeat
|
||||
--exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log
|
||||
|
@@ -8,6 +8,46 @@ DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS VAR_POP;
|
||||
DROP TABLE IF EXISTS TEMP0;
|
||||
DROP TABLE IF EXISTS VAR_SAMP;
|
||||
DROP TABLE IF EXISTS ti;
|
||||
DROP TABLE IF EXISTS members;
|
||||
DROP TABLE IF EXISTS members_2;
|
||||
DROP TABLE IF EXISTS employees;
|
||||
DROP TABLE IF EXISTS employees_2;
|
||||
DROP TABLE IF EXISTS employees_3;
|
||||
DROP TABLE IF EXISTS quarterly_report_status;
|
||||
DROP TABLE IF EXISTS employees_4;
|
||||
DROP TABLE IF EXISTS h2;
|
||||
DROP TABLE IF EXISTS rcx;
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS rc1;
|
||||
DROP TABLE IF EXISTS rx;
|
||||
DROP TABLE IF EXISTS rc2;
|
||||
DROP TABLE IF EXISTS rc3;
|
||||
DROP TABLE IF EXISTS rc4;
|
||||
DROP TABLE IF EXISTS employees_by_lname;
|
||||
DROP TABLE IF EXISTS customers_1;
|
||||
DROP TABLE IF EXISTS customers_2;
|
||||
DROP TABLE IF EXISTS customers_3;
|
||||
DROP TABLE IF EXISTS employees_hash;
|
||||
DROP TABLE IF EXISTS employees_hash_1;
|
||||
DROP TABLE IF EXISTS t1_hash;
|
||||
DROP TABLE IF EXISTS employees_linear_hash;
|
||||
DROP TABLE IF EXISTS t1_linear_hash;
|
||||
DROP TABLE IF EXISTS k1;
|
||||
DROP TABLE IF EXISTS k2;
|
||||
DROP TABLE IF EXISTS tm1;
|
||||
DROP TABLE IF EXISTS tk;
|
||||
DROP TABLE IF EXISTS ts;
|
||||
DROP TABLE IF EXISTS ts_1;
|
||||
DROP TABLE IF EXISTS ts_3;
|
||||
DROP TABLE IF EXISTS ts_4;
|
||||
DROP TABLE IF EXISTS ts_5;
|
||||
DROP TABLE IF EXISTS trb3;
|
||||
DROP TABLE IF EXISTS tr;
|
||||
DROP TABLE IF EXISTS members_3;
|
||||
DROP TABLE IF EXISTS clients;
|
||||
DROP TABLE IF EXISTS clients_lk;
|
||||
DROP TABLE IF EXISTS trb1;
|
||||
|
||||
--enable_warnings
|
||||
|
||||
@@ -22,6 +62,12 @@ while ($i <= $max) {
|
||||
eval $insert;
|
||||
}
|
||||
|
||||
ALTER TABLE t1 REBUILD PARTITION p0, p1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p0, p1;
|
||||
ALTER TABLE t1 ANALYZE PARTITION p3;
|
||||
ALTER TABLE t1 REPAIR PARTITION p0,p1;
|
||||
ALTER TABLE t1 CHECK PARTITION p1;
|
||||
|
||||
# Parition string is "#P#". To verify that parsing is done correctly then we'll
|
||||
# verify if tables containing "P" somwhere can be created correctly.
|
||||
CREATE TABLE VAR_POP (a int) ENGINE = ROCKSDB;
|
||||
@@ -35,8 +81,677 @@ SHOW TABLES;
|
||||
SELECT * FROM t1 ORDER BY i LIMIT 10;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE VAR_POP;
|
||||
DROP TABLE TEMP0;
|
||||
DROP TABLE VAR_SAMP;
|
||||
#
|
||||
# Test-cases above are copied from
|
||||
# https://dev.mysql.com/doc/refman/5.6/en/partitioning.html to validate that the
|
||||
# partitioning related examples work with MyRocks.
|
||||
#
|
||||
|
||||
# Create a table that is partitioned by hash into 6 partitions.
|
||||
CREATE TABLE ti(
|
||||
id INT,
|
||||
amount DECIMAL(7,2),
|
||||
tr_date DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH(MONTH(tr_date))
|
||||
PARTITIONS 6;
|
||||
|
||||
CREATE TABLE members (
|
||||
firstname VARCHAR(25) NOT NULL,
|
||||
lastname VARCHAR(25) NOT NULL,
|
||||
username VARCHAR(16) NOT NULL,
|
||||
email VARCHAR(35),
|
||||
joined DATE NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY(joined)
|
||||
PARTITIONS 6;
|
||||
|
||||
CREATE TABLE members_2 (
|
||||
firstname VARCHAR(25) NOT NULL,
|
||||
lastname VARCHAR(25) NOT NULL,
|
||||
username VARCHAR(16) NOT NULL,
|
||||
email VARCHAR(35),
|
||||
joined DATE NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE(YEAR(joined)) (
|
||||
PARTITION p0 VALUES LESS THAN (1960),
|
||||
PARTITION p1 VALUES LESS THAN (1970),
|
||||
PARTITION p2 VALUES LESS THAN (1980),
|
||||
PARTITION p3 VALUES LESS THAN (1990),
|
||||
PARTITION p4 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
|
||||
# Partition names are not case-sensitive.
|
||||
--error 1517
|
||||
CREATE TABLE t2 (val INT)
|
||||
ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(val)(
|
||||
PARTITION mypart VALUES IN (1,3,5),
|
||||
PARTITION MyPart VALUES IN (2,4,6)
|
||||
);
|
||||
|
||||
CREATE TABLE employees (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT NOT NULL,
|
||||
store_id INT NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (store_id) (
|
||||
PARTITION p0 VALUES LESS THAN (6),
|
||||
PARTITION p1 VALUES LESS THAN (11),
|
||||
PARTITION p2 VALUES LESS THAN (16),
|
||||
PARTITION p3 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
|
||||
CREATE TABLE employees_2 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT NOT NULL,
|
||||
store_id INT NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (job_code) (
|
||||
PARTITION p0 VALUES LESS THAN (100),
|
||||
PARTITION p1 VALUES LESS THAN (1000),
|
||||
PARTITION p2 VALUES LESS THAN (10000)
|
||||
);
|
||||
|
||||
CREATE TABLE employees_3 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (YEAR(separated)) (
|
||||
PARTITION p0 VALUES LESS THAN (1991),
|
||||
PARTITION p1 VALUES LESS THAN (1996),
|
||||
PARTITION p2 VALUES LESS THAN (2001),
|
||||
PARTITION p3 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
|
||||
CREATE TABLE quarterly_report_status (
|
||||
report_id INT NOT NULL,
|
||||
report_status VARCHAR(20) NOT NULL,
|
||||
report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) (
|
||||
PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ),
|
||||
PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ),
|
||||
PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ),
|
||||
PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ),
|
||||
PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ),
|
||||
PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ),
|
||||
PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ),
|
||||
PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ),
|
||||
PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ),
|
||||
PARTITION p9 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
CREATE TABLE employees_4 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(store_id) (
|
||||
PARTITION pNorth VALUES IN (3,5,6,9,17),
|
||||
PARTITION pEast VALUES IN (1,2,10,11,19,20),
|
||||
PARTITION pWest VALUES IN (4,12,13,14,18),
|
||||
PARTITION pCentral VALUES IN (7,8,15,16)
|
||||
);
|
||||
|
||||
CREATE TABLE h2 (
|
||||
c1 INT,
|
||||
c2 INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION p0 VALUES IN (1, 4, 7),
|
||||
PARTITION p1 VALUES IN (2, 5, 8)
|
||||
);
|
||||
|
||||
# ERROR 1526 (HY000): Table has no partition for value 3
|
||||
--error 1526
|
||||
INSERT INTO h2 VALUES (3, 5);
|
||||
|
||||
CREATE TABLE rcx (
|
||||
a INT,
|
||||
b INT,
|
||||
c CHAR(3),
|
||||
d INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,d,c) (
|
||||
PARTITION p0 VALUES LESS THAN (5,10,'ggg'),
|
||||
PARTITION p1 VALUES LESS THAN (10,20,'mmm'),
|
||||
PARTITION p2 VALUES LESS THAN (15,30,'sss'),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
|
||||
);
|
||||
|
||||
CREATE TABLE r1 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE (a) (
|
||||
PARTITION p0 VALUES LESS THAN (5),
|
||||
PARTITION p1 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
INSERT INTO r1 VALUES (5,10), (5,11), (5,12);
|
||||
|
||||
CREATE TABLE rc1 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a, b) (
|
||||
PARTITION p0 VALUES LESS THAN (5, 12),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE)
|
||||
);
|
||||
|
||||
INSERT INTO rc1 VALUES (5,10), (5,11), (5,12);
|
||||
SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12);
|
||||
|
||||
CREATE TABLE rx (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS (a) (
|
||||
PARTITION p0 VALUES LESS THAN (5),
|
||||
PARTITION p1 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
INSERT INTO rx VALUES (5,10), (5,11), (5,12);
|
||||
|
||||
CREATE TABLE rc2 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b) (
|
||||
PARTITION p0 VALUES LESS THAN (0,10),
|
||||
PARTITION p1 VALUES LESS THAN (10,20),
|
||||
PARTITION p2 VALUES LESS THAN (10,30),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
|
||||
);
|
||||
|
||||
CREATE TABLE rc3 (
|
||||
a INT,
|
||||
b INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b) (
|
||||
PARTITION p0 VALUES LESS THAN (0,10),
|
||||
PARTITION p1 VALUES LESS THAN (10,20),
|
||||
PARTITION p2 VALUES LESS THAN (10,30),
|
||||
PARTITION p3 VALUES LESS THAN (10,35),
|
||||
PARTITION p4 VALUES LESS THAN (20,40),
|
||||
PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
|
||||
);
|
||||
|
||||
CREATE TABLE rc4 (
|
||||
a INT,
|
||||
b INT,
|
||||
c INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b,c) (
|
||||
PARTITION p0 VALUES LESS THAN (0,25,50),
|
||||
PARTITION p1 VALUES LESS THAN (10,20,100),
|
||||
PARTITION p2 VALUES LESS THAN (10,30,50),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
|
||||
);
|
||||
|
||||
SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50);
|
||||
|
||||
-- ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition
|
||||
|
||||
--error 1493
|
||||
CREATE TABLE rcf (
|
||||
a INT,
|
||||
b INT,
|
||||
c INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(a,b,c) (
|
||||
PARTITION p0 VALUES LESS THAN (0,25,50),
|
||||
PARTITION p1 VALUES LESS THAN (20,20,100),
|
||||
PARTITION p2 VALUES LESS THAN (10,30,50),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
|
||||
);
|
||||
|
||||
CREATE TABLE employees_by_lname (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT NOT NULL,
|
||||
store_id INT NOT NULL
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS (lname) (
|
||||
PARTITION p0 VALUES LESS THAN ('g'),
|
||||
PARTITION p1 VALUES LESS THAN ('m'),
|
||||
PARTITION p2 VALUES LESS THAN ('t'),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) (
|
||||
PARTITION p0 VALUES LESS THAN ('g'),
|
||||
PARTITION p1 VALUES LESS THAN ('m'),
|
||||
PARTITION p2 VALUES LESS THAN ('t'),
|
||||
PARTITION p3 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) (
|
||||
PARTITION p0 VALUES LESS THAN ('1970-01-01'),
|
||||
PARTITION p1 VALUES LESS THAN ('1980-01-01'),
|
||||
PARTITION p2 VALUES LESS THAN ('1990-01-01'),
|
||||
PARTITION p3 VALUES LESS THAN ('2000-01-01'),
|
||||
PARTITION p4 VALUES LESS THAN ('2010-01-01'),
|
||||
PARTITION p5 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
CREATE TABLE customers_1 (
|
||||
first_name VARCHAR(25),
|
||||
last_name VARCHAR(25),
|
||||
street_1 VARCHAR(30),
|
||||
street_2 VARCHAR(30),
|
||||
city VARCHAR(15),
|
||||
renewal DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST COLUMNS(city) (
|
||||
PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'Mönsterås'),
|
||||
PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'),
|
||||
PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'),
|
||||
PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo')
|
||||
);
|
||||
|
||||
CREATE TABLE customers_2 (
|
||||
first_name VARCHAR(25),
|
||||
last_name VARCHAR(25),
|
||||
street_1 VARCHAR(30),
|
||||
street_2 VARCHAR(30),
|
||||
city VARCHAR(15),
|
||||
renewal DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST COLUMNS(renewal) (
|
||||
PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03',
|
||||
'2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'),
|
||||
PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10',
|
||||
'2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'),
|
||||
PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17',
|
||||
'2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'),
|
||||
PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24',
|
||||
'2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28')
|
||||
);
|
||||
|
||||
CREATE TABLE customers_3 (
|
||||
first_name VARCHAR(25),
|
||||
last_name VARCHAR(25),
|
||||
street_1 VARCHAR(30),
|
||||
street_2 VARCHAR(30),
|
||||
city VARCHAR(15),
|
||||
renewal DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE COLUMNS(renewal) (
|
||||
PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'),
|
||||
PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'),
|
||||
PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'),
|
||||
PARTITION pWeek_4 VALUES LESS THAN('2010-03-01')
|
||||
);
|
||||
|
||||
CREATE TABLE employees_hash (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH(store_id)
|
||||
PARTITIONS 4;
|
||||
|
||||
CREATE TABLE employees_hash_1 (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH( YEAR(hired) )
|
||||
PARTITIONS 4;
|
||||
|
||||
CREATE TABLE t1_hash (
|
||||
col1 INT,
|
||||
col2 CHAR(5),
|
||||
col3 DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH( YEAR(col3) )
|
||||
PARTITIONS 4;
|
||||
|
||||
CREATE TABLE employees_linear_hash (
|
||||
id INT NOT NULL,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
hired DATE NOT NULL DEFAULT '1970-01-01',
|
||||
separated DATE NOT NULL DEFAULT '9999-12-31',
|
||||
job_code INT,
|
||||
store_id INT
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR HASH( YEAR(hired) )
|
||||
PARTITIONS 4;
|
||||
|
||||
CREATE TABLE t1_linear_hash (
|
||||
col1 INT,
|
||||
col2 CHAR(5),
|
||||
col3 DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR HASH( YEAR(col3) )
|
||||
PARTITIONS 6;
|
||||
|
||||
CREATE TABLE k1 (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
name VARCHAR(20)
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY()
|
||||
PARTITIONS 2;
|
||||
|
||||
CREATE TABLE k2 (
|
||||
id INT NOT NULL,
|
||||
name VARCHAR(20),
|
||||
UNIQUE KEY (id)
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY()
|
||||
PARTITIONS 2;
|
||||
|
||||
CREATE TABLE tm1 (
|
||||
s1 CHAR(32) PRIMARY KEY
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY KEY(s1)
|
||||
PARTITIONS 10;
|
||||
|
||||
CREATE TABLE tk (
|
||||
col1 INT NOT NULL,
|
||||
col2 CHAR(5),
|
||||
col3 DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR KEY (col1)
|
||||
PARTITIONS 3;
|
||||
|
||||
CREATE TABLE ts (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) )
|
||||
SUBPARTITIONS 2 (
|
||||
PARTITION p0 VALUES LESS THAN (1990),
|
||||
PARTITION p1 VALUES LESS THAN (2000),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
|
||||
CREATE TABLE ts_1 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s4,
|
||||
SUBPARTITION s5
|
||||
)
|
||||
);
|
||||
|
||||
--error 1064
|
||||
CREATE TABLE ts_2 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
)
|
||||
);
|
||||
|
||||
CREATE TABLE ts_3 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s4,
|
||||
SUBPARTITION s5
|
||||
)
|
||||
);
|
||||
|
||||
CREATE TABLE ts_4 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) )
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0,
|
||||
SUBPARTITION s1
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s2,
|
||||
SUBPARTITION s3
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s4,
|
||||
SUBPARTITION s5
|
||||
)
|
||||
);
|
||||
|
||||
CREATE TABLE ts_5 (
|
||||
id INT,
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE(YEAR(purchased))
|
||||
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990) (
|
||||
SUBPARTITION s0a,
|
||||
SUBPARTITION s0b
|
||||
),
|
||||
PARTITION p1 VALUES LESS THAN (2000) (
|
||||
SUBPARTITION s1a,
|
||||
SUBPARTITION s1b
|
||||
),
|
||||
PARTITION p2 VALUES LESS THAN MAXVALUE (
|
||||
SUBPARTITION s2a,
|
||||
SUBPARTITION s2b
|
||||
)
|
||||
);
|
||||
|
||||
CREATE TABLE trb3 (
|
||||
id INT,
|
||||
name VARCHAR(50),
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990),
|
||||
PARTITION p1 VALUES LESS THAN (1995),
|
||||
PARTITION p2 VALUES LESS THAN (2000),
|
||||
PARTITION p3 VALUES LESS THAN (2005)
|
||||
);
|
||||
|
||||
ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2;
|
||||
|
||||
CREATE TABLE tr (
|
||||
id INT,
|
||||
name VARCHAR(50),
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(purchased) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1990),
|
||||
PARTITION p1 VALUES LESS THAN (1995),
|
||||
PARTITION p2 VALUES LESS THAN (2000),
|
||||
PARTITION p3 VALUES LESS THAN (2005)
|
||||
);
|
||||
|
||||
INSERT INTO tr VALUES
|
||||
(1, 'desk organiser', '2003-10-15'),
|
||||
(2, 'CD player', '1993-11-05'),
|
||||
(3, 'TV set', '1996-03-10'),
|
||||
(4, 'bookcase', '1982-01-10'),
|
||||
(5, 'exercise bike', '2004-05-09'),
|
||||
(6, 'sofa', '1987-06-05'),
|
||||
(7, 'popcorn maker', '2001-11-22'),
|
||||
(8, 'aquarium', '1992-08-04'),
|
||||
(9, 'study desk', '1984-09-16'),
|
||||
(10, 'lava lamp', '1998-12-25');
|
||||
|
||||
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
|
||||
|
||||
ALTER TABLE tr DROP PARTITION p2;
|
||||
|
||||
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
|
||||
|
||||
CREATE TABLE members_3 (
|
||||
id INT,
|
||||
fname VARCHAR(25),
|
||||
lname VARCHAR(25),
|
||||
dob DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE( YEAR(dob) ) (
|
||||
PARTITION p0 VALUES LESS THAN (1970),
|
||||
PARTITION p1 VALUES LESS THAN (1980),
|
||||
PARTITION p2 VALUES LESS THAN (1990)
|
||||
);
|
||||
|
||||
ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000));
|
||||
|
||||
# ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition
|
||||
--error 1493
|
||||
ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960));
|
||||
|
||||
CREATE TABLE clients (
|
||||
id INT,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
signed DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY HASH( MONTH(signed) )
|
||||
PARTITIONS 12;
|
||||
|
||||
ALTER TABLE clients COALESCE PARTITION 4;
|
||||
|
||||
CREATE TABLE clients_lk (
|
||||
id INT,
|
||||
fname VARCHAR(30),
|
||||
lname VARCHAR(30),
|
||||
signed DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LINEAR KEY(signed)
|
||||
PARTITIONS 12;
|
||||
|
||||
# ERROR 1508 (HY000): Cannot remove all partitions, use DROP TABLE instead
|
||||
--error 1508
|
||||
ALTER TABLE clients COALESCE PARTITION 18;
|
||||
|
||||
ALTER TABLE clients ADD PARTITION PARTITIONS 6;
|
||||
|
||||
CREATE TABLE trb1 (
|
||||
id INT,
|
||||
name VARCHAR(50),
|
||||
purchased DATE
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY RANGE(id) (
|
||||
PARTITION p0 VALUES LESS THAN (3),
|
||||
PARTITION p1 VALUES LESS THAN (7),
|
||||
PARTITION p2 VALUES LESS THAN (9),
|
||||
PARTITION p3 VALUES LESS THAN (11)
|
||||
);
|
||||
|
||||
INSERT INTO trb1 VALUES
|
||||
(1, 'desk organiser', '2003-10-15'),
|
||||
(2, 'CD player', '1993-11-05'),
|
||||
(3, 'TV set', '1996-03-10'),
|
||||
(4, 'bookcase', '1982-01-10'),
|
||||
(5, 'exercise bike', '2004-05-09'),
|
||||
(6, 'sofa', '1987-06-05'),
|
||||
(7, 'popcorn maker', '2001-11-22'),
|
||||
(8, 'aquarium', '1992-08-04'),
|
||||
(9, 'study desk', '1984-09-16'),
|
||||
(10, 'lava lamp', '1998-12-25');
|
||||
|
||||
ALTER TABLE trb1 ADD PRIMARY KEY (id);
|
||||
|
||||
# Clean up.
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS VAR_POP;
|
||||
DROP TABLE IF EXISTS TEMP0;
|
||||
DROP TABLE IF EXISTS VAR_SAMP;
|
||||
DROP TABLE IF EXISTS ti;
|
||||
DROP TABLE IF EXISTS members;
|
||||
DROP TABLE IF EXISTS members_2;
|
||||
DROP TABLE IF EXISTS employees;
|
||||
DROP TABLE IF EXISTS employees_2;
|
||||
DROP TABLE IF EXISTS employees_3;
|
||||
DROP TABLE IF EXISTS quarterly_report_status;
|
||||
DROP TABLE IF EXISTS employees_4;
|
||||
DROP TABLE IF EXISTS h2;
|
||||
DROP TABLE IF EXISTS rcx;
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS rc1;
|
||||
DROP TABLE IF EXISTS rx;
|
||||
DROP TABLE IF EXISTS rc2;
|
||||
DROP TABLE IF EXISTS rc3;
|
||||
DROP TABLE IF EXISTS rc4;
|
||||
DROP TABLE IF EXISTS employees_by_lname;
|
||||
DROP TABLE IF EXISTS customers_1;
|
||||
DROP TABLE IF EXISTS customers_2;
|
||||
DROP TABLE IF EXISTS customers_3;
|
||||
DROP TABLE IF EXISTS employees_hash;
|
||||
DROP TABLE IF EXISTS employees_hash_1;
|
||||
DROP TABLE IF EXISTS t1_hash;
|
||||
DROP TABLE IF EXISTS employees_linear_hash;
|
||||
DROP TABLE IF EXISTS t1_linear_hash;
|
||||
DROP TABLE IF EXISTS k1;
|
||||
DROP TABLE IF EXISTS k2;
|
||||
DROP TABLE IF EXISTS tm1;
|
||||
DROP TABLE IF EXISTS tk;
|
||||
DROP TABLE IF EXISTS ts;
|
||||
DROP TABLE IF EXISTS ts_1;
|
||||
DROP TABLE IF EXISTS ts_3;
|
||||
DROP TABLE IF EXISTS ts_4;
|
||||
DROP TABLE IF EXISTS ts_5;
|
||||
DROP TABLE IF EXISTS trb3;
|
||||
DROP TABLE IF EXISTS tr;
|
||||
DROP TABLE IF EXISTS members_3;
|
||||
DROP TABLE IF EXISTS clients;
|
||||
DROP TABLE IF EXISTS clients_lk;
|
||||
DROP TABLE IF EXISTS trb1;
|
||||
|
@@ -11,7 +11,7 @@ DROP TABLE IF EXISTS t1;
|
||||
|
||||
# restart server with correct parameters
|
||||
shutdown_server 10;
|
||||
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name
|
||||
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name
|
||||
--sleep 5
|
||||
--enable_reconnect
|
||||
--source include/wait_until_connected_again.inc
|
||||
@@ -29,7 +29,7 @@ select * from t1 where a = 1;
|
||||
# restart server to re-read cache
|
||||
--exec echo "wait" >$_expect_file_name
|
||||
shutdown_server 10;
|
||||
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name
|
||||
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name
|
||||
--sleep 5
|
||||
--enable_reconnect
|
||||
--source include/wait_until_connected_again.inc
|
||||
|
@@ -792,6 +792,20 @@ insert into t47 values (2, 'row2');
|
||||
set rocksdb_bulk_load=1;
|
||||
insert into t47 values (3, 'row3'),(4, 'row4');
|
||||
set rocksdb_bulk_load=0;
|
||||
# Check concurrent bulk loading
|
||||
--connect (con1,localhost,root,,)
|
||||
set rocksdb_bulk_load=1;
|
||||
insert into t47 values (10, 'row10'),(11, 'row11');
|
||||
--connection default
|
||||
set rocksdb_bulk_load=1;
|
||||
insert into t47 values (100, 'row100'),(101, 'row101');
|
||||
--disconnect con1
|
||||
--connection default
|
||||
set rocksdb_bulk_load=0;
|
||||
--disable_query_log
|
||||
let $wait_condition = select count(*) = 8 as c from t47;
|
||||
--source include/wait_condition.inc
|
||||
--enable_query_log
|
||||
select * from t47;
|
||||
drop table t47;
|
||||
|
||||
|
@@ -0,0 +1,494 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Create a table with multiple partitions, but in the comment don't specify
|
||||
# that per-partition based column families (CF) should be created. Expect that
|
||||
# default CF will be used and new one won't be created.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
# Expecting no results here.
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Same test case as above, only with the reverse CF. Should result in the same
|
||||
# behavior. No new CF-s created, only default one will be used.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
# Expecting no results here.
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Create a table with multiple partitions and request for separate CF to be
|
||||
# created per every partition. As a result we expect three different CF-s to be
|
||||
# created.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
set @@global.rocksdb_compact_cf = 'foo';
|
||||
set @@global.rocksdb_compact_cf = 'my_custom_cf';
|
||||
set @@global.rocksdb_compact_cf = 'baz';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Same test case as above, only one of the partitions has "rev:" prefix. The
|
||||
# intent here is to make sure that qualifier can specify reverse CF as well.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
set @@global.rocksdb_compact_cf = 't1-p0';
|
||||
set @@global.rocksdb_compact_cf = 'rev:bar';
|
||||
set @@global.rocksdb_compact_cf = 't1-p2';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
#
|
||||
# Create a table with multiple partitions and assign two partitions to the same
|
||||
# CF, third one gets a separate partition, and fourth one will belong to a
|
||||
# default one. As a result we expect two new CF-s to be created.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9),
|
||||
PARTITION custom_p3 VALUES IN (10, 20, 30)
|
||||
);
|
||||
|
||||
set @@global.rocksdb_compact_cf = 'cf-zero';
|
||||
set @@global.rocksdb_compact_cf = 'cf-one';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Create a table with CF-s per partition and verify that ALTER TABLE + DROP
|
||||
# INDEX work for that scenario and data is persisted.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
INSERT INTO t1 VALUES (1, 1, "one", null);
|
||||
INSERT INTO t1 VALUES (2, 2, "two", null);
|
||||
INSERT INTO t1 VALUES (3, 3, "three", null);
|
||||
INSERT INTO t1 VALUES (5, 5, "five", null);
|
||||
INSERT INTO t1 VALUES (9, 9, "nine", null);
|
||||
|
||||
SELECT * FROM t1;
|
||||
ALTER TABLE t1 DROP PRIMARY KEY;
|
||||
SELECT * FROM t1;
|
||||
|
||||
#
|
||||
# Verify that we can compact custom CF-s.
|
||||
#
|
||||
set @@global.rocksdb_compact_cf = 'foo';
|
||||
set @@global.rocksdb_compact_cf = 'bar';
|
||||
set @@global.rocksdb_compact_cf = 'baz';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Create a table with CF-s per partition and verify that ALTER TABLE + DROP
|
||||
# INDEX + ADD INDEX work for that scenario and data is persisted and new cf_name_str
|
||||
# are created.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
INSERT INTO t1 VALUES (1, 1, "one", null);
|
||||
INSERT INTO t1 VALUES (2, 2, "two", null);
|
||||
INSERT INTO t1 VALUES (3, 3, "three", null);
|
||||
INSERT INTO t1 VALUES (5, 5, "five", null);
|
||||
INSERT INTO t1 VALUES (9, 9, "nine", null);
|
||||
|
||||
ALTER TABLE t1 DROP PRIMARY KEY;
|
||||
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf';
|
||||
|
||||
set @@global.rocksdb_compact_cf = 'p0_cf';
|
||||
set @@global.rocksdb_compact_cf = 'p1_cf';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Create a table CF-s per partition, use ALTER TABLE to change the way it's
|
||||
# partitioned and verify that new CF-s will be created.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
INSERT INTO t1 VALUES (1, 1, "one", null);
|
||||
INSERT INTO t1 VALUES (2, 2, "two", null);
|
||||
INSERT INTO t1 VALUES (3, 3, "three", null);
|
||||
INSERT INTO t1 VALUES (5, 5, "five", null);
|
||||
INSERT INTO t1 VALUES (9, 9, "nine", null);
|
||||
|
||||
ALTER TABLE t1 PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p3 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9)
|
||||
);
|
||||
|
||||
ALTER TABLE t1 DROP PRIMARY KEY;
|
||||
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf';
|
||||
|
||||
set @@global.rocksdb_compact_cf = 'p3_cf';
|
||||
set @@global.rocksdb_compact_cf = 'p4_cf';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Create a table CF-s per partition, use empty qualifier name. Verify that no
|
||||
# new CF-s are created. This will also make sure that nothing gets added for
|
||||
# `custom_p2`.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 INT,
|
||||
c2 INT,
|
||||
name VARCHAR(25) NOT NULL,
|
||||
event DATE,
|
||||
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;'
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(c1) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Verify some basic partition related operations when using PARTITION BY LIST
|
||||
# COLUMNS on a VARBINARY column on a table with more complicated schema.
|
||||
#
|
||||
|
||||
#
|
||||
# Verify that creating the table without COMMENT actually works.
|
||||
#
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
|
||||
DROP TABLE t2;
|
||||
|
||||
#
|
||||
# Create the same table with two custom CF-s per partition as specified in the
|
||||
# COMMENT.
|
||||
#
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1'
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
|
||||
# Verify that CF-s were created earlier.
|
||||
set @@global.rocksdb_compact_cf = 'my_cf0';
|
||||
set @@global.rocksdb_compact_cf = 'my_cf1';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1';
|
||||
|
||||
# Insert some random data.
|
||||
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
|
||||
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
|
||||
|
||||
# Verify it's there.
|
||||
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
|
||||
|
||||
# Verify it's being fetched from the right partition. This tests partitioning
|
||||
# functionality, but we want to make sure that by adding CF-s per partition we
|
||||
# don't regress anything.
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
|
||||
|
||||
# Delete the current PK and create a new one referencing different CF-s. We
|
||||
# need to verity that new CF-s will be created and no data will be lost in
|
||||
# process.
|
||||
ALTER TABLE t2 DROP PRIMARY KEY;
|
||||
ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1';
|
||||
|
||||
# Verify that new CF-s are created as well.
|
||||
set @@global.rocksdb_compact_cf = 'new_cf0';
|
||||
set @@global.rocksdb_compact_cf = 'new_cf1';
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1';
|
||||
|
||||
# Insert some more random data.
|
||||
INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3);
|
||||
|
||||
# Verify that partition mappings are still intact.
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
|
||||
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
|
||||
|
||||
# Verify that no data is lost.
|
||||
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
|
||||
|
||||
DROP TABLE t2;
|
||||
|
||||
#
|
||||
# Create the same table with two custom CF-s per partition as specified in the
|
||||
# COMMENT. Use both the PK and SK when creating the table.
|
||||
#
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
|
||||
KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5'
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
|
||||
# Verify that CF-s were created for PK.
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0';
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1';
|
||||
|
||||
# Verify that CF-s were created for SK.
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5';
|
||||
|
||||
# Insert some random data.
|
||||
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
|
||||
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
|
||||
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
|
||||
|
||||
# Basic verification that correct partition and key are used when searching.
|
||||
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1;
|
||||
|
||||
# Remove the key.
|
||||
ALTER TABLE t2 DROP KEY `col2`;
|
||||
|
||||
# Add a new key and expect new CF to be created as well.
|
||||
ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5';
|
||||
|
||||
# Verify that CF-s were created for SK.
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5';
|
||||
|
||||
# Verify that correct partition and key are used when searching.
|
||||
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567;
|
||||
|
||||
DROP TABLE t2;
|
||||
|
||||
#
|
||||
# Verify the same scenario as before, but with a UNIQUE KEY in addition to PK.
|
||||
#
|
||||
CREATE TABLE `t2` (
|
||||
`col1` bigint(20) NOT NULL,
|
||||
`col2` varbinary(64) NOT NULL,
|
||||
`col3` varbinary(256) NOT NULL,
|
||||
`col4` bigint(20) NOT NULL,
|
||||
`col5` mediumblob NOT NULL,
|
||||
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
|
||||
UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5'
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
|
||||
PARTITION BY LIST COLUMNS (`col2`) (
|
||||
PARTITION custom_p0 VALUES IN (0x12345),
|
||||
PARTITION custom_p1 VALUES IN (0x23456),
|
||||
PARTITION custom_p2 VALUES IN (0x34567),
|
||||
PARTITION custom_p3 VALUES IN (0x45678),
|
||||
PARTITION custom_p4 VALUES IN (0x56789),
|
||||
PARTITION custom_p5 VALUES IN (0x6789A),
|
||||
PARTITION custom_p6 VALUES IN (0x789AB),
|
||||
PARTITION custom_p7 VALUES IN (0x89ABC)
|
||||
);
|
||||
|
||||
# Verify that CF-s were created for SK.
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5';
|
||||
|
||||
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
|
||||
|
||||
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
|
||||
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
|
||||
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
|
||||
|
||||
DROP TABLE t2;
|
||||
|
||||
#
|
||||
# Verify that both partitioned and non-partitioned table can share a CF.
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
`a` int,
|
||||
PRIMARY KEY (a) COMMENT "sharedcf"
|
||||
) ENGINE=ROCKSDB;
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf';
|
||||
|
||||
#
|
||||
# We expect this to succeed.
|
||||
#
|
||||
CREATE TABLE t2 (
|
||||
`a` INT,
|
||||
`b` DATE,
|
||||
`c` VARCHAR(42),
|
||||
PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf"
|
||||
) ENGINE=ROCKSDB
|
||||
PARTITION BY LIST(`a`) (
|
||||
PARTITION custom_p0 VALUES IN (1, 4, 7),
|
||||
PARTITION custom_p1 VALUES IN (2, 5, 8),
|
||||
PARTITION custom_p2 VALUES IN (3, 6, 9)
|
||||
);
|
||||
|
||||
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf';
|
||||
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
@@ -16,6 +16,10 @@ DROP TABLE IF EXISTS t4;
|
||||
CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
|
||||
CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB;
|
||||
CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB;
|
||||
|
||||
# With partition based column family creation we now expect all the partitions
|
||||
# to belong to a default column family because mapping wasn't specified in
|
||||
# this case.
|
||||
CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB
|
||||
PARTITION BY KEY(l) PARTITIONS 4;
|
||||
|
||||
|
@@ -1,37 +1,41 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
#
|
||||
# Check whether DATA DIRECTORY and INDEX DIRECTORY
|
||||
# are supported in CREATE and ALTER TABLE
|
||||
# Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified
|
||||
# then MyRocks returns an appropriate error. We don't support this
|
||||
# functionality and therefore shouldn't just silently accept the values.
|
||||
#
|
||||
# Note: the test does not check whether the options
|
||||
# have any real effect on the table, only
|
||||
# that they are accepted
|
||||
# (and apparently ignored)
|
||||
#
|
||||
|
||||
--let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/
|
||||
--let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/
|
||||
--mkdir $data_dir
|
||||
--mkdir $index_dir
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
|
||||
eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir';
|
||||
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
|
||||
SHOW CREATE TABLE t1;
|
||||
--error 1296
|
||||
eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data';
|
||||
|
||||
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
|
||||
eval ALTER TABLE t1 INDEX DIRECTORY = '$data_dir';
|
||||
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
|
||||
SHOW CREATE TABLE t1;
|
||||
--error 1296
|
||||
eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--rmdir $data_dir
|
||||
--rmdir $index_dir
|
||||
#
|
||||
# Verify that we'll get the same error codes when using the partitions.
|
||||
#
|
||||
|
||||
--error 1296
|
||||
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id)
|
||||
(
|
||||
PARTITION P0 VALUES LESS THAN (1000)
|
||||
DATA DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P1 VALUES LESS THAN (2000)
|
||||
DATA DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P2 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
--error 1296
|
||||
CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id)
|
||||
(
|
||||
PARTITION P0 VALUES LESS THAN (1000)
|
||||
INDEX DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P1 VALUES LESS THAN (2000)
|
||||
INDEX DIRECTORY = '/foo/bar/data/',
|
||||
PARTITION P2 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
@@ -6,10 +6,9 @@ select * from t1 where id=5;
|
||||
select * from t1 where value=5;
|
||||
select value from t1 where value=5;
|
||||
select * from t1 where value2=5;
|
||||
select * from t1 where id < 3;
|
||||
select * from t1 where value < 3;
|
||||
select value from t1 where value < 3;
|
||||
select * from t1 where value2 < 3;
|
||||
select * from t1;
|
||||
select value from t1;
|
||||
|
||||
select * from t1 where id < 3 order by id;
|
||||
select * from t1 where value < 3 order by id;
|
||||
select value from t1 where value < 3 order by id;
|
||||
select * from t1 where value2 < 3 order by id;
|
||||
select * from t1 order by id;
|
||||
select value from t1 order by id;
|
||||
|
@@ -143,3 +143,29 @@ disconnect con1;
|
||||
disconnect con2;
|
||||
disconnect con3;
|
||||
drop table t1, t2;
|
||||
|
||||
# skip_unique checks should skip checks only for tables that don't have
|
||||
# secondary indexes
|
||||
connection default;
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2,t3;
|
||||
--enable_warnings
|
||||
|
||||
# table with PK only
|
||||
create table t1 (id int, value int, primary key (id)) engine=rocksdb;
|
||||
# table with PK and SK
|
||||
create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
|
||||
# table with hidden PK
|
||||
create table t3 (id int, value int) engine=rocksdb;
|
||||
|
||||
SET @old_val = @@session.unique_checks;
|
||||
set @@session.unique_checks = FALSE;
|
||||
|
||||
insert into t1 values (1, 1), (1, 2);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t2 values (1, 1, 1), (1, 2, 1);
|
||||
insert into t3 values (1, 1), (1, 1);
|
||||
|
||||
set @@session.unique_checks = @old_val;
|
||||
# cleanup
|
||||
drop table t1, t2, t3;
|
||||
|
@@ -5,7 +5,7 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
|
||||
|
||||
create table aaa (id int primary key, i int) engine rocksdb;
|
||||
|
||||
SET LOCAL rocksdb_write_sync=off;
|
||||
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
|
||||
--exec sleep 30
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
insert aaa(id, i) values(1,1);
|
||||
@@ -15,7 +15,7 @@ select variable_value-@a from information_schema.global_status where variable_na
|
||||
insert aaa(id, i) values(3,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
|
||||
SET LOCAL rocksdb_write_sync=1;
|
||||
SET LOCAL rocksdb_flush_log_at_trx_commit=1;
|
||||
insert aaa(id, i) values(4,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
insert aaa(id, i) values(5,1);
|
||||
@@ -24,7 +24,7 @@ insert aaa(id, i) values(6,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
|
||||
SET GLOBAL rocksdb_background_sync=on;
|
||||
SET LOCAL rocksdb_write_sync=off;
|
||||
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
|
||||
insert aaa(id, i) values(7,1);
|
||||
|
||||
let $status_var=rocksdb_wal_synced;
|
||||
@@ -35,7 +35,7 @@ truncate table aaa;
|
||||
|
||||
# Cleanup
|
||||
drop table aaa;
|
||||
SET GLOBAL rocksdb_write_sync=off;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
SET GLOBAL rocksdb_write_disable_wal=false;
|
||||
SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
|
||||
SET GLOBAL rocksdb_background_sync=off;
|
||||
|
@@ -3,9 +3,11 @@
|
||||
[mysqld.1]
|
||||
log_slave_updates
|
||||
rocksdb_enable_2pc=OFF
|
||||
rocksdb_wal_recovery_mode=2
|
||||
|
||||
[mysqld.2]
|
||||
relay_log_recovery=1
|
||||
relay_log_info_repository=TABLE
|
||||
log_slave_updates
|
||||
rocksdb_enable_2pc=OFF
|
||||
rocksdb_wal_recovery_mode=2
|
||||
|
@@ -1 +1 @@
|
||||
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
|
||||
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF
|
||||
|
@@ -1,2 +1,2 @@
|
||||
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
|
||||
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF
|
||||
--sync_binlog=1000 --relay_log_recovery=1
|
||||
|
@@ -5,6 +5,7 @@ log_slave_updates
|
||||
gtid_mode=ON
|
||||
enforce_gtid_consistency=ON
|
||||
rocksdb_enable_2pc=OFF
|
||||
rocksdb_wal_recovery_mode=2
|
||||
|
||||
[mysqld.2]
|
||||
sync_relay_log_info=100
|
||||
@@ -14,3 +15,4 @@ log_slave_updates
|
||||
gtid_mode=ON
|
||||
enforce_gtid_consistency=ON
|
||||
rocksdb_enable_2pc=OFF
|
||||
rocksdb_wal_recovery_mode=2
|
||||
|
@@ -1 +1 @@
|
||||
--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_write_sync=ON --rocksdb_write_disable_wal=OFF
|
||||
--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_flush_log_at_trx_commit=1 --rocksdb_write_disable_wal=OFF
|
||||
|
@@ -0,0 +1,100 @@
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
INSERT INTO valid_values VALUES('on');
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
INSERT INTO invalid_values VALUES('\'bbb\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
0
|
||||
SET @start_session_value = @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
SELECT @start_session_value;
|
||||
@start_session_value
|
||||
0
|
||||
'# Setting to valid values in global scope#'
|
||||
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
'# Setting to valid values in session scope#'
|
||||
"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1"
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0"
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on"
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
'# Testing with invalid values in global scope #'
|
||||
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'aaa'"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'aaa';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'bbb'"
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'bbb';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_global_value;
|
||||
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_session_value;
|
||||
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
|
||||
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
0
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -1,3 +1,4 @@
|
||||
call mtr.add_suppression(" Column family '[a-z]*' not found.");
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES('abc');
|
||||
INSERT INTO valid_values VALUES('def');
|
||||
|
@@ -0,0 +1,85 @@
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(100);
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
INSERT INTO invalid_values VALUES('\'bbb\'');
|
||||
INSERT INTO invalid_values VALUES('\'-1\'');
|
||||
INSERT INTO invalid_values VALUES('\'101\'');
|
||||
INSERT INTO invalid_values VALUES('\'484436\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
16777216
|
||||
'# Setting to valid values in global scope#'
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 100"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 100;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
100
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 1"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 1;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 0"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 0;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
0
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@session.ROCKSDB_DELAYED_WRITE_RATE to 444. It should fail because it is not session."
|
||||
SET @@session.ROCKSDB_DELAYED_WRITE_RATE = 444;
|
||||
ERROR HY000: Variable 'rocksdb_delayed_write_rate' is a GLOBAL variable and should be set with SET GLOBAL
|
||||
'# Testing with invalid values in global scope #'
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'aaa'"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'aaa';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'bbb'"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'bbb';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '-1'"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '-1';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '101'"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '101';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '484436'"
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '484436';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = @start_global_value;
|
||||
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
|
||||
@@global.ROCKSDB_DELAYED_WRITE_RATE
|
||||
16777216
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -1,7 +0,0 @@
|
||||
SET @start_global_value = @@global.ROCKSDB_DISABLEDATASYNC;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_DISABLEDATASYNC to 444. It should fail because it is readonly."
|
||||
SET @@global.ROCKSDB_DISABLEDATASYNC = 444;
|
||||
ERROR HY000: Variable 'rocksdb_disabledatasync' is a read only variable
|
@@ -0,0 +1,93 @@
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(2);
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
1
|
||||
SET @start_session_value = @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
SELECT @start_session_value;
|
||||
@start_session_value
|
||||
1
|
||||
'# Setting to valid values in global scope#'
|
||||
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
2
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
0
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
'# Setting to valid values in session scope#'
|
||||
"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2"
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
2
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1"
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0"
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
0
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
'# Testing with invalid values in global scope #'
|
||||
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 'aaa'"
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 'aaa';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_global_value;
|
||||
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_session_value;
|
||||
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
|
||||
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
1
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -21,7 +21,7 @@ Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
SHOW INDEXES FROM t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||
t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
|
||||
t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE
|
||||
set session rocksdb_flush_memtable_on_analyze=on;
|
||||
ANALYZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
@@ -48,11 +48,11 @@ a b
|
||||
3 3
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 0 0 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
|
||||
t1 ROCKSDB 10 Fixed # # 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
|
||||
ANALYZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 3 8 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
|
||||
t1 ROCKSDB 10 Fixed # # 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
|
||||
DROP TABLE t1;
|
||||
|
@@ -0,0 +1,15 @@
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo');
|
||||
SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats;
|
||||
set global rocksdb_force_flush_memtable_now = true;
|
||||
INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d');
|
||||
set global rocksdb_force_compute_memtable_stats=0;
|
||||
SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
|
||||
set global rocksdb_force_compute_memtable_stats=1;
|
||||
SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
|
||||
select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end;
|
||||
case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end
|
||||
true
|
||||
DROP TABLE t;
|
||||
set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK;
|
@@ -0,0 +1,100 @@
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
INSERT INTO valid_values VALUES('on');
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
INSERT INTO invalid_values VALUES('\'bbb\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
0
|
||||
SET @start_session_value = @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
SELECT @start_session_value;
|
||||
@start_session_value
|
||||
0
|
||||
'# Setting to valid values in global scope#'
|
||||
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 1"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 1;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 0"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 0;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to on"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = on;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
'# Setting to valid values in session scope#'
|
||||
"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 1"
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 1;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 0"
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 0;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to on"
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = on;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
'# Testing with invalid values in global scope #'
|
||||
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'aaa'"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'aaa';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'bbb'"
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'bbb';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = @start_global_value;
|
||||
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@global.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = @start_session_value;
|
||||
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
|
||||
@@session.ROCKSDB_MASTER_SKIP_TX_API
|
||||
0
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -3,12 +3,12 @@ INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(1024);
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE;
|
||||
SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE to 444. It should fail because it is readonly."
|
||||
SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE = 444;
|
||||
ERROR HY000: Variable 'rocksdb_persistent_cache_size' is a read only variable
|
||||
"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB to 444. It should fail because it is readonly."
|
||||
SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB = 444;
|
||||
ERROR HY000: Variable 'rocksdb_persistent_cache_size_mb' is a read only variable
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -6,7 +6,7 @@ INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
2
|
||||
1
|
||||
'# Setting to valid values in global scope#'
|
||||
"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 1"
|
||||
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 1;
|
||||
@@ -17,7 +17,7 @@ SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
@@global.ROCKSDB_WAL_RECOVERY_MODE
|
||||
2
|
||||
1
|
||||
"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 0"
|
||||
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 0;
|
||||
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
@@ -27,7 +27,7 @@ SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
@@global.ROCKSDB_WAL_RECOVERY_MODE
|
||||
2
|
||||
1
|
||||
"Trying to set variable @@session.ROCKSDB_WAL_RECOVERY_MODE to 444. It should fail because it is not session."
|
||||
SET @@session.ROCKSDB_WAL_RECOVERY_MODE = 444;
|
||||
ERROR HY000: Variable 'rocksdb_wal_recovery_mode' is a GLOBAL variable and should be set with SET GLOBAL
|
||||
@@ -37,10 +37,10 @@ SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 'aaa';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
@@global.ROCKSDB_WAL_RECOVERY_MODE
|
||||
2
|
||||
1
|
||||
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = @start_global_value;
|
||||
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
|
||||
@@global.ROCKSDB_WAL_RECOVERY_MODE
|
||||
2
|
||||
1
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
||||
|
@@ -1,114 +0,0 @@
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
INSERT INTO valid_values VALUES('on');
|
||||
INSERT INTO valid_values VALUES('off');
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
SET @start_global_value = @@global.ROCKSDB_WRITE_SYNC;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
0
|
||||
SET @start_session_value = @@session.ROCKSDB_WRITE_SYNC;
|
||||
SELECT @start_session_value;
|
||||
@start_session_value
|
||||
0
|
||||
'# Setting to valid values in global scope#'
|
||||
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 1"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = 1;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 0"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = 0;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to on"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = on;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
1
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to off"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = off;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Setting the global scope variable back to default"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
'# Setting to valid values in session scope#'
|
||||
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 1"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = 1;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 0"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = 0;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to on"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = on;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
1
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to off"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = off;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
"Setting the session scope variable back to default"
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
'# Testing with invalid values in global scope #'
|
||||
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 'aaa'"
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = 'aaa';
|
||||
Got one of the listed errors
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
SET @@global.ROCKSDB_WRITE_SYNC = @start_global_value;
|
||||
SELECT @@global.ROCKSDB_WRITE_SYNC;
|
||||
@@global.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
SET @@session.ROCKSDB_WRITE_SYNC = @start_session_value;
|
||||
SELECT @@session.ROCKSDB_WRITE_SYNC;
|
||||
@@session.ROCKSDB_WRITE_SYNC
|
||||
0
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -0,0 +1,18 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
INSERT INTO valid_values VALUES('on');
|
||||
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
INSERT INTO invalid_values VALUES('\'bbb\'');
|
||||
|
||||
--let $sys_var=ROCKSDB_BLIND_DELETE_PRIMARY_KEY
|
||||
--let $read_only=0
|
||||
--let $session=1
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
||||
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
@@ -1,3 +1,6 @@
|
||||
|
||||
call mtr.add_suppression(" Column family '[a-z]*' not found.");
|
||||
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
@@ -10,6 +13,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
--let $read_only=0
|
||||
--let $session=0
|
||||
--let $sticky=1
|
||||
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
||||
|
||||
DROP TABLE valid_values;
|
||||
|
@@ -0,0 +1,22 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(100);
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
INSERT INTO invalid_values VALUES('\'bbb\'');
|
||||
INSERT INTO invalid_values VALUES('\'-1\'');
|
||||
INSERT INTO invalid_values VALUES('\'101\'');
|
||||
INSERT INTO invalid_values VALUES('\'484436\'');
|
||||
|
||||
--let $sys_var=ROCKSDB_DELAYED_WRITE_RATE
|
||||
--let $read_only=0
|
||||
--let $session=0
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
||||
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
||||
|
@@ -1,6 +0,0 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
--let $sys_var=ROCKSDB_DISABLEDATASYNC
|
||||
--let $read_only=1
|
||||
--let $session=0
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
@@ -0,0 +1,18 @@
|
||||
--source include/have_rocksdb.inc
|
||||
|
||||
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(2);
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
|
||||
--let $sys_var=ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
|
||||
--let $read_only=0
|
||||
--let $session=1
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
||||
|
||||
DROP TABLE valid_values;
|
||||
DROP TABLE invalid_values;
|
||||
|
@@ -37,8 +37,10 @@ INSERT INTO t1 (b) VALUES (3);
|
||||
--sorted_result
|
||||
SELECT * FROM t1;
|
||||
|
||||
--replace_column 5 # 6 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
ANALYZE TABLE t1;
|
||||
--replace_column 5 # 6 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
@@ -0,0 +1,23 @@
|
||||
--source include/have_rocksdb.inc
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo');
|
||||
|
||||
SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats;
|
||||
set global rocksdb_force_flush_memtable_now = true;
|
||||
|
||||
INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d');
|
||||
|
||||
set global rocksdb_force_compute_memtable_stats=0;
|
||||
SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
|
||||
|
||||
set global rocksdb_force_compute_memtable_stats=1;
|
||||
SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
|
||||
|
||||
select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end;
|
||||
|
||||
DROP TABLE t;
|
||||
set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK;
|
@@ -4,12 +4,12 @@ CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO valid_values VALUES(1);
|
||||
INSERT INTO valid_values VALUES(0);
|
||||
INSERT INTO valid_values VALUES('on');
|
||||
INSERT INTO valid_values VALUES('off');
|
||||
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
INSERT INTO invalid_values VALUES('\'bbb\'');
|
||||
|
||||
--let $sys_var=ROCKSDB_WRITE_SYNC
|
||||
--let $sys_var=ROCKSDB_MASTER_SKIP_TX_API
|
||||
--let $read_only=0
|
||||
--let $session=1
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
@@ -7,7 +7,7 @@ INSERT INTO valid_values VALUES(1024);
|
||||
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
|
||||
INSERT INTO invalid_values VALUES('\'aaa\'');
|
||||
|
||||
--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE
|
||||
--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE_MB
|
||||
--let $read_only=1
|
||||
--let $session=0
|
||||
--source suite/sys_vars/inc/rocksdb_sys_var.inc
|
@@ -483,9 +483,16 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment,
|
||||
m_entry_single_deletes += s.m_entry_single_deletes;
|
||||
m_entry_merges += s.m_entry_merges;
|
||||
m_entry_others += s.m_entry_others;
|
||||
if (s.m_distinct_keys_per_prefix.size() > 0) {
|
||||
for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) {
|
||||
m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i];
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) {
|
||||
m_distinct_keys_per_prefix[i] +=
|
||||
s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
m_rows -= s.m_rows;
|
||||
m_data_size -= s.m_data_size;
|
||||
@@ -495,9 +502,16 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment,
|
||||
m_entry_single_deletes -= s.m_entry_single_deletes;
|
||||
m_entry_merges -= s.m_entry_merges;
|
||||
m_entry_others -= s.m_entry_others;
|
||||
if (s.m_distinct_keys_per_prefix.size() > 0) {
|
||||
for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) {
|
||||
m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i];
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) {
|
||||
m_distinct_keys_per_prefix[i] -=
|
||||
s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -24,6 +24,7 @@
|
||||
/* MyRocks header files */
|
||||
#include "./ha_rocksdb.h"
|
||||
#include "./ha_rocksdb_proto.h"
|
||||
#include "./rdb_psi.h"
|
||||
|
||||
namespace myrocks {
|
||||
|
||||
@@ -31,20 +32,13 @@ namespace myrocks {
|
||||
bool Rdb_cf_manager::is_cf_name_reverse(const char *const name) {
|
||||
/* nullptr means the default CF is used.. (TODO: can the default CF be
|
||||
* reverse?) */
|
||||
if (name && !strncmp(name, "rev:", 4))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
return (name && !strncmp(name, "rev:", 4));
|
||||
}
|
||||
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
static PSI_mutex_key ex_key_cfm;
|
||||
#endif
|
||||
|
||||
void Rdb_cf_manager::init(
|
||||
Rdb_cf_options *const cf_options,
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> *const handles) {
|
||||
mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST);
|
||||
mysql_mutex_init(rdb_cfm_mutex_key, &m_mutex, MY_MUTEX_INIT_FAST);
|
||||
|
||||
DBUG_ASSERT(cf_options != nullptr);
|
||||
DBUG_ASSERT(handles != nullptr);
|
||||
@@ -96,14 +90,20 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
|
||||
DBUG_ASSERT(rdb != nullptr);
|
||||
DBUG_ASSERT(is_automatic != nullptr);
|
||||
|
||||
rocksdb::ColumnFamilyHandle *cf_handle;
|
||||
rocksdb::ColumnFamilyHandle *cf_handle = nullptr;
|
||||
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
*is_automatic = false;
|
||||
if (cf_name == nullptr)
|
||||
|
||||
if (cf_name == nullptr || *cf_name == '\0') {
|
||||
cf_name = DEFAULT_CF_NAME;
|
||||
}
|
||||
|
||||
DBUG_ASSERT(cf_name != nullptr);
|
||||
|
||||
std::string per_index_name;
|
||||
|
||||
if (!strcmp(cf_name, PER_INDEX_CF_NAME)) {
|
||||
get_per_index_cf_name(db_table_name, index_name, &per_index_name);
|
||||
cf_name = per_index_name.c_str();
|
||||
@@ -111,15 +111,17 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
|
||||
}
|
||||
|
||||
const auto it = m_cf_name_map.find(cf_name);
|
||||
if (it != m_cf_name_map.end())
|
||||
|
||||
if (it != m_cf_name_map.end()) {
|
||||
cf_handle = it->second;
|
||||
else {
|
||||
} else {
|
||||
/* Create a Column Family. */
|
||||
const std::string cf_name_str(cf_name);
|
||||
rocksdb::ColumnFamilyOptions opts;
|
||||
m_cf_options->get_cf_options(cf_name_str, &opts);
|
||||
|
||||
sql_print_information("RocksDB: creating column family %s",
|
||||
// NO_LINT_DEBUG
|
||||
sql_print_information("RocksDB: creating a column family %s",
|
||||
cf_name_str.c_str());
|
||||
sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size);
|
||||
sql_print_information(" target_file_size_base=%" PRIu64,
|
||||
@@ -127,6 +129,7 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
|
||||
|
||||
const rocksdb::Status s =
|
||||
rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle);
|
||||
|
||||
if (s.ok()) {
|
||||
m_cf_name_map[cf_handle->GetName()] = cf_handle;
|
||||
m_cf_id_map[cf_handle->GetID()] = cf_handle;
|
||||
@@ -134,7 +137,8 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
|
||||
cf_handle = nullptr;
|
||||
}
|
||||
}
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
|
||||
return cf_handle;
|
||||
}
|
||||
@@ -160,13 +164,18 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name,
|
||||
rocksdb::ColumnFamilyHandle *cf_handle;
|
||||
|
||||
*is_automatic = false;
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
if (cf_name == nullptr)
|
||||
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
|
||||
if (cf_name == nullptr) {
|
||||
cf_name = DEFAULT_CF_NAME;
|
||||
}
|
||||
|
||||
std::string per_index_name;
|
||||
|
||||
if (!strcmp(cf_name, PER_INDEX_CF_NAME)) {
|
||||
get_per_index_cf_name(db_table_name, index_name, &per_index_name);
|
||||
DBUG_ASSERT(!per_index_name.empty());
|
||||
cf_name = per_index_name.c_str();
|
||||
*is_automatic = true;
|
||||
}
|
||||
@@ -174,7 +183,12 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name,
|
||||
const auto it = m_cf_name_map.find(cf_name);
|
||||
cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr;
|
||||
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
if (!cf_handle) {
|
||||
// NO_LINT_DEBUG
|
||||
sql_print_warning("Column family '%s' not found.", cf_name);
|
||||
}
|
||||
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
|
||||
return cf_handle;
|
||||
}
|
||||
@@ -182,11 +196,11 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name,
|
||||
rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const {
|
||||
rocksdb::ColumnFamilyHandle *cf_handle = nullptr;
|
||||
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
const auto it = m_cf_id_map.find(id);
|
||||
if (it != m_cf_id_map.end())
|
||||
cf_handle = it->second;
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
|
||||
return cf_handle;
|
||||
}
|
||||
@@ -194,11 +208,12 @@ rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const {
|
||||
std::vector<std::string> Rdb_cf_manager::get_cf_names(void) const {
|
||||
std::vector<std::string> names;
|
||||
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
for (auto it : m_cf_name_map) {
|
||||
names.push_back(it.first);
|
||||
}
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
|
||||
return names;
|
||||
}
|
||||
|
||||
@@ -206,11 +221,13 @@ std::vector<rocksdb::ColumnFamilyHandle *>
|
||||
Rdb_cf_manager::get_all_cf(void) const {
|
||||
std::vector<rocksdb::ColumnFamilyHandle *> list;
|
||||
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
|
||||
for (auto it : m_cf_id_map) {
|
||||
list.push_back(it.second);
|
||||
}
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
|
||||
return list;
|
||||
}
|
||||
|
@@ -55,12 +55,13 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg,
|
||||
rocksdb::ColumnFamilyHandle *cf_handle_arg,
|
||||
uint16_t index_dict_version_arg, uchar index_type_arg,
|
||||
uint16_t kv_format_version_arg, bool is_reverse_cf_arg,
|
||||
bool is_auto_cf_arg, const char *_name,
|
||||
Rdb_index_stats _stats)
|
||||
bool is_auto_cf_arg, bool is_per_partition_cf_arg,
|
||||
const char *_name, Rdb_index_stats _stats)
|
||||
: m_index_number(indexnr_arg), m_cf_handle(cf_handle_arg),
|
||||
m_index_dict_version(index_dict_version_arg),
|
||||
m_index_type(index_type_arg), m_kv_format_version(kv_format_version_arg),
|
||||
m_is_reverse_cf(is_reverse_cf_arg), m_is_auto_cf(is_auto_cf_arg),
|
||||
m_is_per_partition_cf(is_per_partition_cf_arg),
|
||||
m_name(_name), m_stats(_stats), m_pk_part_no(nullptr),
|
||||
m_pack_info(nullptr), m_keyno(keyno_arg), m_key_parts(0),
|
||||
m_prefix_extractor(nullptr), m_maxlength(0) // means 'not intialized'
|
||||
@@ -73,6 +74,7 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg,
|
||||
Rdb_key_def::Rdb_key_def(const Rdb_key_def &k)
|
||||
: m_index_number(k.m_index_number), m_cf_handle(k.m_cf_handle),
|
||||
m_is_reverse_cf(k.m_is_reverse_cf), m_is_auto_cf(k.m_is_auto_cf),
|
||||
m_is_per_partition_cf(k.m_is_per_partition_cf),
|
||||
m_name(k.m_name), m_stats(k.m_stats), m_pk_part_no(k.m_pk_part_no),
|
||||
m_pack_info(k.m_pack_info), m_keyno(k.m_keyno),
|
||||
m_key_parts(k.m_key_parts), m_prefix_extractor(k.m_prefix_extractor),
|
||||
@@ -116,9 +118,9 @@ void Rdb_key_def::setup(const TABLE *const tbl,
|
||||
const bool hidden_pk_exists = table_has_hidden_pk(tbl);
|
||||
const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY);
|
||||
if (!m_maxlength) {
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
if (m_maxlength != 0) {
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -280,7 +282,7 @@ void Rdb_key_def::setup(const TABLE *const tbl,
|
||||
*/
|
||||
m_maxlength = max_len;
|
||||
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,6 +517,50 @@ int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) {
|
||||
return changed;
|
||||
}
|
||||
|
||||
uchar *Rdb_key_def::pack_field(
|
||||
Field *const field,
|
||||
Rdb_field_packing *pack_info,
|
||||
uchar * tuple,
|
||||
uchar *const packed_tuple,
|
||||
uchar *const pack_buffer,
|
||||
Rdb_string_writer *const unpack_info,
|
||||
uint *const n_null_fields) const
|
||||
{
|
||||
if (field->real_maybe_null()) {
|
||||
DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1));
|
||||
if (field->is_real_null()) {
|
||||
/* NULL value. store '\0' so that it sorts before non-NULL values */
|
||||
*tuple++ = 0;
|
||||
/* That's it, don't store anything else */
|
||||
if (n_null_fields)
|
||||
(*n_null_fields)++;
|
||||
return tuple;
|
||||
} else {
|
||||
/* Not a NULL value. Store '1' */
|
||||
*tuple++ = 1;
|
||||
}
|
||||
}
|
||||
|
||||
const bool create_unpack_info =
|
||||
(unpack_info && // we were requested to generate unpack_info
|
||||
pack_info->uses_unpack_info()); // and this keypart uses it
|
||||
Rdb_pack_field_context pack_ctx(unpack_info);
|
||||
|
||||
// Set the offset for methods which do not take an offset as an argument
|
||||
DBUG_ASSERT(is_storage_available(tuple - packed_tuple,
|
||||
pack_info->m_max_image_len));
|
||||
|
||||
pack_info->m_pack_func(pack_info, field, pack_buffer, &tuple, &pack_ctx);
|
||||
|
||||
/* Make "unpack info" to be stored in the value */
|
||||
if (create_unpack_info) {
|
||||
pack_info->m_make_unpack_info_func(pack_info->m_charset_codec, field,
|
||||
&pack_ctx);
|
||||
}
|
||||
|
||||
return tuple;
|
||||
}
|
||||
|
||||
/**
|
||||
Get index columns from the record and pack them into mem-comparable form.
|
||||
|
||||
@@ -595,45 +641,21 @@ uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer,
|
||||
Field *const field = m_pack_info[i].get_field_in_table(tbl);
|
||||
DBUG_ASSERT(field != nullptr);
|
||||
|
||||
// Old Field methods expected the record pointer to be at tbl->record[0].
|
||||
// The quick and easy way to fix this was to pass along the offset
|
||||
// for the pointer.
|
||||
const my_ptrdiff_t ptr_diff = record - tbl->record[0];
|
||||
uint field_offset = field->ptr - tbl->record[0];
|
||||
uint null_offset = field->null_offset(tbl->record[0]);
|
||||
bool maybe_null = field->real_maybe_null();
|
||||
field->move_field(const_cast<uchar*>(record) + field_offset,
|
||||
maybe_null ? const_cast<uchar*>(record) + null_offset : nullptr,
|
||||
field->null_bit);
|
||||
// WARNING! Don't return without restoring field->ptr and field->null_ptr
|
||||
|
||||
if (field->real_maybe_null()) {
|
||||
DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1));
|
||||
if (field->is_real_null(ptr_diff)) {
|
||||
/* NULL value. store '\0' so that it sorts before non-NULL values */
|
||||
*tuple++ = 0;
|
||||
/* That's it, don't store anything else */
|
||||
if (n_null_fields)
|
||||
(*n_null_fields)++;
|
||||
continue;
|
||||
} else {
|
||||
/* Not a NULL value. Store '1' */
|
||||
*tuple++ = 1;
|
||||
}
|
||||
}
|
||||
tuple = pack_field(field, &m_pack_info[i], tuple, packed_tuple, pack_buffer,
|
||||
unpack_info, n_null_fields);
|
||||
|
||||
const bool create_unpack_info =
|
||||
(unpack_info && // we were requested to generate unpack_info
|
||||
m_pack_info[i].uses_unpack_info()); // and this keypart uses it
|
||||
Rdb_pack_field_context pack_ctx(unpack_info);
|
||||
|
||||
// Set the offset for methods which do not take an offset as an argument
|
||||
DBUG_ASSERT(is_storage_available(tuple - packed_tuple,
|
||||
m_pack_info[i].m_max_image_len));
|
||||
field->move_field_offset(ptr_diff);
|
||||
|
||||
m_pack_info[i].m_pack_func(&m_pack_info[i], field, pack_buffer, &tuple,
|
||||
&pack_ctx);
|
||||
|
||||
/* Make "unpack info" to be stored in the value */
|
||||
if (create_unpack_info) {
|
||||
m_pack_info[i].m_make_unpack_info_func(m_pack_info[i].m_charset_codec,
|
||||
field, &pack_ctx);
|
||||
}
|
||||
field->move_field_offset(-ptr_diff);
|
||||
// Restore field->ptr and field->null_ptr
|
||||
field->move_field(tbl->record[0] + field_offset,
|
||||
maybe_null ? tbl->record[0] + null_offset : nullptr,
|
||||
field->null_bit);
|
||||
}
|
||||
|
||||
if (unpack_info) {
|
||||
@@ -824,6 +846,35 @@ size_t Rdb_key_def::key_length(const TABLE *const table,
|
||||
return key.size() - reader.remaining_bytes();
|
||||
}
|
||||
|
||||
int Rdb_key_def::unpack_field(
|
||||
Rdb_field_packing *const fpi,
|
||||
Field *const field,
|
||||
Rdb_string_reader* reader,
|
||||
const uchar *const default_value,
|
||||
Rdb_string_reader* unp_reader) const
|
||||
{
|
||||
if (fpi->m_maybe_null) {
|
||||
const char *nullp;
|
||||
if (!(nullp = reader->read(1))) {
|
||||
return HA_EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if (*nullp == 0) {
|
||||
/* Set the NULL-bit of this field */
|
||||
field->set_null();
|
||||
/* Also set the field to its default value */
|
||||
memcpy(field->ptr, default_value, field->pack_length());
|
||||
return HA_EXIT_SUCCESS;
|
||||
} else if (*nullp == 1) {
|
||||
field->set_notnull();
|
||||
} else {
|
||||
return HA_EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
return fpi->m_unpack_func(fpi, field, field->ptr, reader, unp_reader);
|
||||
}
|
||||
|
||||
/*
|
||||
Take mem-comparable form and unpack_info and unpack it to Table->record
|
||||
|
||||
@@ -850,11 +901,6 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf,
|
||||
// ha_rocksdb::convert_record_from_storage_format instead.
|
||||
DBUG_ASSERT_IMP(!secondary_key, !verify_row_debug_checksums);
|
||||
|
||||
// Old Field methods expected the record pointer to be at tbl->record[0].
|
||||
// The quick and easy way to fix this was to pass along the offset
|
||||
// for the pointer.
|
||||
const my_ptrdiff_t ptr_diff = buf - table->record[0];
|
||||
|
||||
// Skip the index number
|
||||
if ((!reader.read(INDEX_NUMBER_SIZE))) {
|
||||
return HA_EXIT_FAILURE;
|
||||
@@ -891,35 +937,31 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf,
|
||||
if (fpi->m_unpack_func) {
|
||||
/* It is possible to unpack this column. Do it. */
|
||||
|
||||
if (fpi->m_maybe_null) {
|
||||
const char *nullp;
|
||||
if (!(nullp = reader.read(1)))
|
||||
return HA_EXIT_FAILURE;
|
||||
if (*nullp == 0) {
|
||||
/* Set the NULL-bit of this field */
|
||||
field->set_null(ptr_diff);
|
||||
/* Also set the field to its default value */
|
||||
uint field_offset = field->ptr - table->record[0];
|
||||
memcpy(buf + field_offset, table->s->default_values + field_offset,
|
||||
field->pack_length());
|
||||
continue;
|
||||
} else if (*nullp == 1)
|
||||
field->set_notnull(ptr_diff);
|
||||
else
|
||||
return HA_EXIT_FAILURE;
|
||||
}
|
||||
uint null_offset = field->null_offset();
|
||||
bool maybe_null = field->real_maybe_null();
|
||||
field->move_field(buf + field_offset,
|
||||
maybe_null ? buf + null_offset : nullptr,
|
||||
field->null_bit);
|
||||
// WARNING! Don't return without restoring field->ptr and field->null_ptr
|
||||
|
||||
// If we need unpack info, but there is none, tell the unpack function
|
||||
// this by passing unp_reader as nullptr. If we never read unpack_info
|
||||
// during unpacking anyway, then there won't an error.
|
||||
const bool maybe_missing_unpack =
|
||||
!has_unpack_info && fpi->uses_unpack_info();
|
||||
const int res =
|
||||
fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, &reader,
|
||||
int res = unpack_field(fpi, field, &reader,
|
||||
table->s->default_values + field_offset,
|
||||
maybe_missing_unpack ? nullptr : &unp_reader);
|
||||
|
||||
if (res)
|
||||
// Restore field->ptr and field->null_ptr
|
||||
field->move_field(table->record[0] + field_offset,
|
||||
maybe_null ? table->record[0] + null_offset : nullptr,
|
||||
field->null_bit);
|
||||
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
/* It is impossible to unpack the column. Skip it. */
|
||||
if (fpi->m_maybe_null) {
|
||||
@@ -2141,7 +2183,7 @@ static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs,
|
||||
size_t *const mb_len) {
|
||||
DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE);
|
||||
if (!rdb_mem_comparable_space[cs->number].get()) {
|
||||
mysql_mutex_lock(&rdb_mem_cmp_space_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(rdb_mem_cmp_space_mutex);
|
||||
if (!rdb_mem_comparable_space[cs->number].get()) {
|
||||
// Upper bound of how many bytes can be occupied by multi-byte form of a
|
||||
// character in any charset.
|
||||
@@ -2167,7 +2209,7 @@ static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs,
|
||||
}
|
||||
rdb_mem_comparable_space[cs->number].reset(info);
|
||||
}
|
||||
mysql_mutex_unlock(&rdb_mem_cmp_space_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(rdb_mem_cmp_space_mutex);
|
||||
}
|
||||
|
||||
*xfrm = &rdb_mem_comparable_space[cs->number]->spaces_xfrm;
|
||||
@@ -2191,7 +2233,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) {
|
||||
const Rdb_collation_codec *codec = rdb_collation_data[cs->number];
|
||||
|
||||
if (codec == nullptr && rdb_is_collation_supported(cs)) {
|
||||
mysql_mutex_lock(&rdb_collation_data_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(rdb_collation_data_mutex);
|
||||
|
||||
codec = rdb_collation_data[cs->number];
|
||||
if (codec == nullptr) {
|
||||
Rdb_collation_codec *cur = nullptr;
|
||||
@@ -2235,7 +2278,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) {
|
||||
rdb_collation_data[cs->number] = cur;
|
||||
}
|
||||
}
|
||||
mysql_mutex_unlock(&rdb_collation_data_mutex);
|
||||
|
||||
RDB_MUTEX_UNLOCK_CHECK(rdb_collation_data_mutex);
|
||||
}
|
||||
|
||||
return codec;
|
||||
@@ -2597,9 +2641,10 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict,
|
||||
for (uint i = 0; i < m_key_count; i++) {
|
||||
const Rdb_key_def &kd = *m_key_descr_arr[i];
|
||||
|
||||
const uchar flags =
|
||||
uchar flags =
|
||||
(kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) |
|
||||
(kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0);
|
||||
(kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0) |
|
||||
(kd.m_is_per_partition_cf ? Rdb_key_def::PER_PARTITION_CF_FLAG : 0);
|
||||
|
||||
const uint cf_id = kd.get_cf()->GetID();
|
||||
/*
|
||||
@@ -2610,13 +2655,21 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict,
|
||||
control, we can switch to use it and removing mutex.
|
||||
*/
|
||||
uint existing_cf_flags;
|
||||
const std::string cf_name = kd.get_cf()->GetName();
|
||||
|
||||
if (dict->get_cf_flags(cf_id, &existing_cf_flags)) {
|
||||
// For the purposes of comparison we'll clear the partitioning bit. The
|
||||
// intent here is to make sure that both partitioned and non-partitioned
|
||||
// tables can refer to the same CF.
|
||||
existing_cf_flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE;
|
||||
flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE;
|
||||
|
||||
if (existing_cf_flags != flags) {
|
||||
my_printf_error(ER_UNKNOWN_ERROR,
|
||||
"Column Family Flag is different from existing flag. "
|
||||
"Assign a new CF flag, or do not change existing "
|
||||
"CF flag.",
|
||||
MYF(0));
|
||||
"Column family ('%s') flag (%d) is different from an "
|
||||
"existing flag (%d). Assign a new CF flag, or do not "
|
||||
"change existing CF flag.", MYF(0), cf_name.c_str(),
|
||||
flags, existing_cf_flags);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
@@ -2690,6 +2743,24 @@ void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) {
|
||||
m_index_num_to_keydef.erase(gl_index_id);
|
||||
}
|
||||
|
||||
void Rdb_ddl_manager::add_uncommitted_keydefs(
|
||||
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes) {
|
||||
mysql_rwlock_wrlock(&m_rwlock);
|
||||
for (const auto &index : indexes) {
|
||||
m_index_num_to_uncommitted_keydef[index->get_gl_index_id()] = index;
|
||||
}
|
||||
mysql_rwlock_unlock(&m_rwlock);
|
||||
}
|
||||
|
||||
void Rdb_ddl_manager::remove_uncommitted_keydefs(
|
||||
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes) {
|
||||
mysql_rwlock_wrlock(&m_rwlock);
|
||||
for (const auto &index : indexes) {
|
||||
m_index_num_to_uncommitted_keydef.erase(index->get_gl_index_id());
|
||||
}
|
||||
mysql_rwlock_unlock(&m_rwlock);
|
||||
}
|
||||
|
||||
namespace // anonymous namespace = not visible outside this source file
|
||||
{
|
||||
struct Rdb_validate_tbls : public Rdb_tables_scanner {
|
||||
@@ -3005,7 +3076,8 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg,
|
||||
tdef->m_key_descr_arr[keyno] = std::make_shared<Rdb_key_def>(
|
||||
gl_index_id.index_id, keyno, cfh, m_index_dict_version, m_index_type,
|
||||
kv_version, flags & Rdb_key_def::REVERSE_CF_FLAG,
|
||||
flags & Rdb_key_def::AUTO_CF_FLAG, "",
|
||||
flags & Rdb_key_def::AUTO_CF_FLAG,
|
||||
flags & Rdb_key_def::PER_PARTITION_CF_FLAG, "",
|
||||
m_dict->get_stats(gl_index_id));
|
||||
}
|
||||
put(tdef);
|
||||
@@ -3079,6 +3151,14 @@ Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) {
|
||||
ret = kd;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id);
|
||||
if (it != m_index_num_to_uncommitted_keydef.end()) {
|
||||
const auto &kd = it->second;
|
||||
if (kd->max_storage_fmt_length() != 0) {
|
||||
ret = kd;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mysql_rwlock_unlock(&m_rwlock);
|
||||
@@ -3097,6 +3177,11 @@ Rdb_ddl_manager::find(GL_INDEX_ID gl_index_id) {
|
||||
return table_def->m_key_descr_arr[it->second.second];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id);
|
||||
if (it != m_index_num_to_uncommitted_keydef.end()) {
|
||||
return it->second;
|
||||
}
|
||||
}
|
||||
|
||||
static std::shared_ptr<Rdb_key_def> empty = nullptr;
|
||||
@@ -3126,6 +3211,8 @@ void Rdb_ddl_manager::adjust_stats(
|
||||
for (const auto &src : data) {
|
||||
const auto &keydef = find(src.m_gl_index_id);
|
||||
if (keydef) {
|
||||
keydef->m_stats.m_distinct_keys_per_prefix.resize(
|
||||
keydef->get_key_parts());
|
||||
keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length());
|
||||
m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats;
|
||||
}
|
||||
@@ -3671,6 +3758,7 @@ void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch *const batch,
|
||||
void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch,
|
||||
const GL_INDEX_ID &gl_index_id) const {
|
||||
delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id);
|
||||
delete_with_prefix(batch, Rdb_key_def::INDEX_STATISTICS, gl_index_id);
|
||||
}
|
||||
|
||||
bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id,
|
||||
@@ -4133,7 +4221,7 @@ uint Rdb_seq_generator::get_and_update_next_number(
|
||||
DBUG_ASSERT(dict != nullptr);
|
||||
|
||||
uint res;
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
|
||||
res = m_next_number++;
|
||||
|
||||
@@ -4144,7 +4232,7 @@ uint Rdb_seq_generator::get_and_update_next_number(
|
||||
dict->update_max_index_id(batch, res);
|
||||
dict->commit(batch);
|
||||
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@@ -167,6 +167,13 @@ public:
|
||||
uchar *const packed_tuple, const uchar *const key_tuple,
|
||||
const key_part_map &keypart_map) const;
|
||||
|
||||
uchar *pack_field(Field *const field,
|
||||
Rdb_field_packing *pack_info,
|
||||
uchar * tuple,
|
||||
uchar *const packed_tuple,
|
||||
uchar *const pack_buffer,
|
||||
Rdb_string_writer *const unpack_info,
|
||||
uint *const n_null_fields) const;
|
||||
/* Convert a key from Table->record format to mem-comparable form */
|
||||
uint pack_record(const TABLE *const tbl, uchar *const pack_buffer,
|
||||
const uchar *const record, uchar *const packed_tuple,
|
||||
@@ -177,6 +184,11 @@ public:
|
||||
/* Pack the hidden primary key into mem-comparable form. */
|
||||
uint pack_hidden_pk(const longlong &hidden_pk_id,
|
||||
uchar *const packed_tuple) const;
|
||||
int unpack_field(Rdb_field_packing *const fpi,
|
||||
Field *const field,
|
||||
Rdb_string_reader* reader,
|
||||
const uchar *const default_value,
|
||||
Rdb_string_reader* unp_reader) const;
|
||||
int unpack_record(TABLE *const table, uchar *const buf,
|
||||
const rocksdb::Slice *const packed_key,
|
||||
const rocksdb::Slice *const unpack_info,
|
||||
@@ -287,7 +299,7 @@ public:
|
||||
rocksdb::ColumnFamilyHandle *cf_handle_arg,
|
||||
uint16_t index_dict_version_arg, uchar index_type_arg,
|
||||
uint16_t kv_format_version_arg, bool is_reverse_cf_arg,
|
||||
bool is_auto_cf_arg, const char *name,
|
||||
bool is_auto_cf_arg, bool is_per_partition_cf, const char *name,
|
||||
Rdb_index_stats stats = Rdb_index_stats());
|
||||
~Rdb_key_def();
|
||||
|
||||
@@ -303,8 +315,13 @@ public:
|
||||
enum {
|
||||
REVERSE_CF_FLAG = 1,
|
||||
AUTO_CF_FLAG = 2,
|
||||
PER_PARTITION_CF_FLAG = 4,
|
||||
};
|
||||
|
||||
// Set of flags to ignore when comparing two CF-s and determining if
|
||||
// they're same.
|
||||
static const uint CF_FLAGS_TO_IGNORE = PER_PARTITION_CF_FLAG;
|
||||
|
||||
// Data dictionary types
|
||||
enum DATA_DICT_TYPE {
|
||||
DDL_ENTRY_INDEX_START_NUMBER = 1,
|
||||
@@ -414,6 +431,10 @@ public:
|
||||
bool m_is_reverse_cf;
|
||||
|
||||
bool m_is_auto_cf;
|
||||
|
||||
/* If true, then column family is created per partition. */
|
||||
bool m_is_per_partition_cf;
|
||||
|
||||
std::string m_name;
|
||||
mutable Rdb_index_stats m_stats;
|
||||
|
||||
@@ -740,8 +761,13 @@ interface Rdb_tables_scanner {
|
||||
class Rdb_ddl_manager {
|
||||
Rdb_dict_manager *m_dict = nullptr;
|
||||
my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements
|
||||
// maps index id to <table_name, index number>
|
||||
// Maps index id to <table_name, index number>
|
||||
std::map<GL_INDEX_ID, std::pair<std::string, uint>> m_index_num_to_keydef;
|
||||
|
||||
// Maps index id to key definitons not yet committed to data dictionary.
|
||||
// This is mainly used to store key definitions during ALTER TABLE.
|
||||
std::map<GL_INDEX_ID, std::shared_ptr<Rdb_key_def>>
|
||||
m_index_num_to_uncommitted_keydef;
|
||||
mysql_rwlock_t m_rwlock;
|
||||
|
||||
Rdb_seq_generator m_sequence;
|
||||
@@ -787,6 +813,10 @@ public:
|
||||
int scan_for_tables(Rdb_tables_scanner *tables_scanner);
|
||||
|
||||
void erase_index_num(const GL_INDEX_ID &gl_index_id);
|
||||
void add_uncommitted_keydefs(
|
||||
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes);
|
||||
void remove_uncommitted_keydefs(
|
||||
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes);
|
||||
|
||||
private:
|
||||
/* Put the data into in-memory table (only) */
|
||||
@@ -867,7 +897,7 @@ private:
|
||||
|
||||
3. CF id => CF flags
|
||||
key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id
|
||||
value: version, {is_reverse_cf, is_auto_cf}
|
||||
value: version, {is_reverse_cf, is_auto_cf, is_per_partition_cf}
|
||||
cf_flags is 4 bytes in total.
|
||||
|
||||
4. Binlog entry (updated at commit)
|
||||
@@ -930,9 +960,9 @@ public:
|
||||
|
||||
inline void cleanup() { mysql_mutex_destroy(&m_mutex); }
|
||||
|
||||
inline void lock() { mysql_mutex_lock(&m_mutex); }
|
||||
inline void lock() { RDB_MUTEX_LOCK_CHECK(m_mutex); }
|
||||
|
||||
inline void unlock() { mysql_mutex_unlock(&m_mutex); }
|
||||
inline void unlock() { RDB_MUTEX_UNLOCK_CHECK(m_mutex); }
|
||||
|
||||
/* Raw RocksDB operations */
|
||||
std::unique_ptr<rocksdb::WriteBatch> begin() const;
|
||||
|
@@ -256,7 +256,7 @@ static int rdb_i_s_perf_context_fill_table(
|
||||
Rdb_perf_counters counters;
|
||||
|
||||
if (rdb_normalize_tablename(it, &str)) {
|
||||
return HA_ERR_INTERNAL_ERROR;
|
||||
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) {
|
||||
@@ -447,8 +447,6 @@ static int rdb_i_s_cfoptions_fill_table(
|
||||
opts.disable_auto_compactions ? "ON" : "OFF"},
|
||||
{"PURGE_REDUNDANT_KVS_WHILE_FLUSH",
|
||||
opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"},
|
||||
{"VERIFY_CHECKSUM_IN_COMPACTION",
|
||||
opts.verify_checksums_in_compaction ? "ON" : "OFF"},
|
||||
{"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS",
|
||||
std::to_string(opts.max_sequential_skip_in_iterations)},
|
||||
{"MEMTABLE_FACTORY", opts.memtable_factory == nullptr
|
||||
@@ -463,8 +461,6 @@ static int rdb_i_s_cfoptions_fill_table(
|
||||
std::to_string(opts.memtable_huge_page_size)},
|
||||
{"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)},
|
||||
{"MAX_SUCCESSIVE_MERGES", std::to_string(opts.max_successive_merges)},
|
||||
{"MIN_PARTIAL_MERGE_OPERANDS",
|
||||
std::to_string(opts.min_partial_merge_operands)},
|
||||
{"OPTIMIZE_FILTERS_FOR_HITS",
|
||||
(opts.optimize_filters_for_hits ? "ON" : "OFF")},
|
||||
};
|
||||
@@ -811,7 +807,7 @@ static int rdb_i_s_compact_stats_fill_table(
|
||||
DBUG_ASSERT(thd != nullptr);
|
||||
DBUG_ASSERT(tables != nullptr);
|
||||
|
||||
DBUG_ENTER("rdb_i_s_global_compact_stats_table");
|
||||
DBUG_ENTER_FUNC();
|
||||
|
||||
int ret = 0;
|
||||
|
||||
@@ -1025,7 +1021,7 @@ static int rdb_i_s_global_info_init(void *const p) {
|
||||
static int rdb_i_s_compact_stats_init(void *p) {
|
||||
my_core::ST_SCHEMA_TABLE *schema;
|
||||
|
||||
DBUG_ENTER("rdb_i_s_compact_stats_init");
|
||||
DBUG_ENTER_FUNC();
|
||||
DBUG_ASSERT(p != nullptr);
|
||||
|
||||
schema = reinterpret_cast<my_core::ST_SCHEMA_TABLE *>(p);
|
||||
|
@@ -159,7 +159,7 @@ Rdb_mutex::Rdb_mutex() {
|
||||
Rdb_mutex::~Rdb_mutex() { mysql_mutex_destroy(&m_mutex); }
|
||||
|
||||
Status Rdb_mutex::Lock() {
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0);
|
||||
return Status::OK();
|
||||
}
|
||||
@@ -174,7 +174,7 @@ Status Rdb_mutex::TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) {
|
||||
Note: PThreads API has pthread_mutex_timedlock(), but mysql's
|
||||
mysql_mutex_* wrappers do not wrap that function.
|
||||
*/
|
||||
mysql_mutex_lock(&m_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_mutex);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ void Rdb_mutex::UnLock() {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
mysql_mutex_unlock(&m_mutex);
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
|
||||
}
|
||||
|
||||
} // namespace myrocks
|
||||
|
113
storage/rocksdb/rdb_psi.cc
Normal file
113
storage/rocksdb/rdb_psi.cc
Normal file
@@ -0,0 +1,113 @@
|
||||
/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
|
||||
|
||||
#ifdef USE_PRAGMA_IMPLEMENTATION
|
||||
#pragma implementation // gcc: Class implementation
|
||||
#endif
|
||||
|
||||
#define MYSQL_SERVER 1
|
||||
|
||||
/* The C++ file's header */
|
||||
#include "./rdb_psi.h"
|
||||
|
||||
/* MySQL header files */
|
||||
#include <mysql/psi/mysql_stage.h>
|
||||
|
||||
namespace myrocks {
|
||||
|
||||
/*
|
||||
The following is needed as an argument for mysql_stage_register,
|
||||
irrespectively of whether we're compiling with P_S or not.
|
||||
*/
|
||||
my_core::PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock",
|
||||
0};
|
||||
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
my_core::PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock};
|
||||
|
||||
my_core::PSI_thread_key rdb_background_psi_thread_key,
|
||||
rdb_drop_idx_psi_thread_key;
|
||||
|
||||
my_core::PSI_thread_info all_rocksdb_threads[] = {
|
||||
{&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL},
|
||||
{&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL},
|
||||
};
|
||||
|
||||
my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, rdb_signal_bg_psi_mutex_key,
|
||||
rdb_signal_drop_idx_psi_mutex_key, rdb_collation_data_mutex_key,
|
||||
rdb_mem_cmp_space_mutex_key, key_mutex_tx_list, rdb_sysvars_psi_mutex_key,
|
||||
rdb_cfm_mutex_key;
|
||||
|
||||
my_core::PSI_mutex_info all_rocksdb_mutexes[] = {
|
||||
{&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL},
|
||||
{&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL},
|
||||
{&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL},
|
||||
{&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL},
|
||||
{&rdb_mem_cmp_space_mutex_key, "collation space char data init",
|
||||
PSI_FLAG_GLOBAL},
|
||||
{&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL},
|
||||
{&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL},
|
||||
{&rdb_cfm_mutex_key, "column family manager", PSI_FLAG_GLOBAL},
|
||||
};
|
||||
|
||||
my_core::PSI_rwlock_key key_rwlock_collation_exception_list,
|
||||
key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables;
|
||||
|
||||
my_core::PSI_rwlock_info all_rocksdb_rwlocks[] = {
|
||||
{&key_rwlock_collation_exception_list, "collation_exception_list",
|
||||
PSI_FLAG_GLOBAL},
|
||||
{&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL},
|
||||
{&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables",
|
||||
PSI_FLAG_GLOBAL},
|
||||
};
|
||||
|
||||
my_core::PSI_cond_key rdb_signal_bg_psi_cond_key,
|
||||
rdb_signal_drop_idx_psi_cond_key;
|
||||
|
||||
my_core::PSI_cond_info all_rocksdb_conds[] = {
|
||||
{&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL},
|
||||
{&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index",
|
||||
PSI_FLAG_GLOBAL},
|
||||
};
|
||||
|
||||
void init_rocksdb_psi_keys() {
|
||||
const char *const category = "rocksdb";
|
||||
int count;
|
||||
|
||||
if (PSI_server == nullptr)
|
||||
return;
|
||||
|
||||
count = array_elements(all_rocksdb_mutexes);
|
||||
PSI_server->register_mutex(category, all_rocksdb_mutexes, count);
|
||||
|
||||
count = array_elements(all_rocksdb_rwlocks);
|
||||
PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count);
|
||||
|
||||
count = array_elements(all_rocksdb_conds);
|
||||
//TODO Disabling PFS for conditions due to the bug
|
||||
// https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92
|
||||
// PSI_server->register_cond(category, all_rocksdb_conds, count);
|
||||
|
||||
count = array_elements(all_rocksdb_stages);
|
||||
mysql_stage_register(category, all_rocksdb_stages, count);
|
||||
|
||||
count = array_elements(all_rocksdb_threads);
|
||||
mysql_thread_register(category, all_rocksdb_threads, count);
|
||||
}
|
||||
#else // HAVE_PSI_INTERFACE
|
||||
void init_rocksdb_psi_keys() {}
|
||||
#endif // HAVE_PSI_INTERFACE
|
||||
|
||||
} // namespace myrocks
|
55
storage/rocksdb/rdb_psi.h
Normal file
55
storage/rocksdb/rdb_psi.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
|
||||
#pragma once
|
||||
|
||||
#ifndef _rdb_psi_h_
|
||||
#define _rdb_psi_h_
|
||||
|
||||
/* MySQL header files */
|
||||
#include <my_global.h>
|
||||
#include <mysql/psi/psi.h>
|
||||
|
||||
/* MyRocks header files */
|
||||
#include "./rdb_utils.h"
|
||||
|
||||
namespace myrocks {
|
||||
|
||||
/*
|
||||
The following is needed as an argument for mysql_stage_register,
|
||||
irrespectively of whether we're compiling with P_S or not.
|
||||
*/
|
||||
extern my_core::PSI_stage_info stage_waiting_on_row_lock;
|
||||
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
extern my_core::PSI_thread_key rdb_background_psi_thread_key,
|
||||
rdb_drop_idx_psi_thread_key;
|
||||
|
||||
extern my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key,
|
||||
rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key,
|
||||
rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key,
|
||||
key_mutex_tx_list, rdb_sysvars_psi_mutex_key, rdb_cfm_mutex_key;
|
||||
|
||||
extern my_core::PSI_rwlock_key key_rwlock_collation_exception_list,
|
||||
key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables;
|
||||
|
||||
extern my_core::PSI_cond_key rdb_signal_bg_psi_cond_key,
|
||||
rdb_signal_drop_idx_psi_cond_key;
|
||||
#endif // HAVE_PSI_INTERFACE
|
||||
|
||||
void init_rocksdb_psi_keys();
|
||||
|
||||
} // namespace myrocks
|
||||
|
||||
#endif // _rdb_psi_h_
|
@@ -191,6 +191,10 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename,
|
||||
m_prefix += normalized_table + "_" + indexname + "_";
|
||||
}
|
||||
|
||||
// Unique filename generated to prevent collisions when the same table
|
||||
// is loaded in parallel
|
||||
m_prefix += std::to_string(m_prefix_counter.fetch_add(1)) + "_";
|
||||
|
||||
rocksdb::ColumnFamilyDescriptor cf_descr;
|
||||
const rocksdb::Status s = m_cf->GetDescriptor(&cf_descr);
|
||||
if (!s.ok()) {
|
||||
@@ -221,7 +225,7 @@ int Rdb_sst_info::open_new_sst_file() {
|
||||
// Open the sst file
|
||||
const rocksdb::Status s = m_sst_file->open();
|
||||
if (!s.ok()) {
|
||||
set_error_msg(s.ToString());
|
||||
set_error_msg(m_sst_file->get_name(), s.ToString());
|
||||
delete m_sst_file;
|
||||
m_sst_file = nullptr;
|
||||
return HA_EXIT_FAILURE;
|
||||
@@ -255,7 +259,7 @@ void Rdb_sst_info::close_curr_sst_file() {
|
||||
#else
|
||||
const rocksdb::Status s = m_sst_file->commit();
|
||||
if (!s.ok()) {
|
||||
set_error_msg(s.ToString());
|
||||
set_error_msg(m_sst_file->get_name(), s.ToString());
|
||||
}
|
||||
|
||||
delete m_sst_file;
|
||||
@@ -293,7 +297,7 @@ int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) {
|
||||
// Add the key/value to the current sst file
|
||||
const rocksdb::Status s = m_sst_file->put(key, value);
|
||||
if (!s.ok()) {
|
||||
set_error_msg(s.ToString());
|
||||
set_error_msg(m_sst_file->get_name(), s.ToString());
|
||||
return HA_EXIT_FAILURE;
|
||||
}
|
||||
|
||||
@@ -329,16 +333,18 @@ int Rdb_sst_info::commit() {
|
||||
return HA_EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
void Rdb_sst_info::set_error_msg(const std::string &msg) {
|
||||
void Rdb_sst_info::set_error_msg(const std::string &sst_file_name,
|
||||
const std::string &msg) {
|
||||
#if defined(RDB_SST_INFO_USE_THREAD)
|
||||
// Both the foreground and background threads can set the error message
|
||||
// so lock the mutex to protect it. We only want the first error that
|
||||
// we encounter.
|
||||
const std::lock_guard<std::mutex> guard(m_mutex);
|
||||
#endif
|
||||
my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str());
|
||||
my_printf_error(ER_UNKNOWN_ERROR, "[%s] bulk load error: %s", MYF(0),
|
||||
sst_file_name.c_str(), msg.c_str());
|
||||
if (m_error_msg.empty()) {
|
||||
m_error_msg = msg;
|
||||
m_error_msg = "[" + sst_file_name + "] " + msg;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -366,7 +372,7 @@ void Rdb_sst_info::run_thread() {
|
||||
// Close out the sst file and add it to the database
|
||||
const rocksdb::Status s = sst_file->commit();
|
||||
if (!s.ok()) {
|
||||
set_error_msg(s.ToString());
|
||||
set_error_msg(sst_file->get_name(), s.ToString());
|
||||
}
|
||||
|
||||
delete sst_file;
|
||||
@@ -412,5 +418,6 @@ void Rdb_sst_info::init(const rocksdb::DB *const db) {
|
||||
my_dirend(dir_info);
|
||||
}
|
||||
|
||||
std::atomic<uint64_t> Rdb_sst_info::m_prefix_counter(0);
|
||||
std::string Rdb_sst_info::m_suffix = ".bulk_load.tmp";
|
||||
} // namespace myrocks
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#pragma once
|
||||
|
||||
/* C++ standard header files */
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
@@ -55,6 +56,7 @@ public:
|
||||
rocksdb::Status open();
|
||||
rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value);
|
||||
rocksdb::Status commit();
|
||||
const std::string get_name() const { return m_name; }
|
||||
};
|
||||
|
||||
class Rdb_sst_info {
|
||||
@@ -70,6 +72,7 @@ private:
|
||||
uint m_sst_count;
|
||||
std::string m_error_msg;
|
||||
std::string m_prefix;
|
||||
static std::atomic<uint64_t> m_prefix_counter;
|
||||
static std::string m_suffix;
|
||||
#if defined(RDB_SST_INFO_USE_THREAD)
|
||||
std::queue<Rdb_sst_file *> m_queue;
|
||||
@@ -83,7 +86,7 @@ private:
|
||||
|
||||
int open_new_sst_file();
|
||||
void close_curr_sst_file();
|
||||
void set_error_msg(const std::string &msg);
|
||||
void set_error_msg(const std::string &sst_file_name, const std::string &msg);
|
||||
|
||||
#if defined(RDB_SST_INFO_USE_THREAD)
|
||||
void run_thread();
|
||||
|
@@ -28,6 +28,7 @@ void *Rdb_thread::thread_func(void *const thread_ptr) {
|
||||
DBUG_ASSERT(thread_ptr != nullptr);
|
||||
Rdb_thread *const thread = static_cast<Rdb_thread *const>(thread_ptr);
|
||||
if (!thread->m_run_once.exchange(true)) {
|
||||
thread->setname();
|
||||
thread->run();
|
||||
thread->uninit();
|
||||
}
|
||||
@@ -56,32 +57,24 @@ int Rdb_thread::create_thread(const std::string &thread_name
|
||||
PSI_thread_key background_psi_thread_key
|
||||
#endif
|
||||
) {
|
||||
DBUG_ASSERT(!thread_name.empty());
|
||||
// Make a copy of the name so we can return without worrying that the
|
||||
// caller will free the memory
|
||||
m_name = thread_name;
|
||||
|
||||
int err = mysql_thread_create(background_psi_thread_key, &m_handle, nullptr,
|
||||
return mysql_thread_create(background_psi_thread_key, &m_handle, nullptr,
|
||||
thread_func, this);
|
||||
|
||||
if (!err) {
|
||||
/*
|
||||
mysql_thread_create() ends up doing some work underneath and setting the
|
||||
thread name as "my-func". This isn't what we want. Our intent is to name
|
||||
the threads according to their purpose so that when displayed under the
|
||||
debugger then they'll be more easily identifiable. Therefore we'll reset
|
||||
the name if thread was successfully created.
|
||||
*/
|
||||
err = pthread_setname_np(m_handle, thread_name.c_str());
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void Rdb_thread::signal(const bool &stop_thread) {
|
||||
mysql_mutex_lock(&m_signal_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
|
||||
|
||||
if (stop_thread) {
|
||||
m_stop = true;
|
||||
}
|
||||
|
||||
mysql_cond_signal(&m_signal_cond);
|
||||
mysql_mutex_unlock(&m_signal_mutex);
|
||||
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
|
||||
}
|
||||
|
||||
} // namespace myrocks
|
||||
|
@@ -40,6 +40,8 @@ private:
|
||||
|
||||
pthread_t m_handle;
|
||||
|
||||
std::string m_name;
|
||||
|
||||
protected:
|
||||
mysql_mutex_t m_signal_mutex;
|
||||
mysql_cond_t m_signal_cond;
|
||||
@@ -64,6 +66,31 @@ public:
|
||||
|
||||
int join() { return pthread_join(m_handle, nullptr); }
|
||||
|
||||
void setname() {
|
||||
/*
|
||||
mysql_thread_create() ends up doing some work underneath and setting the
|
||||
thread name as "my-func". This isn't what we want. Our intent is to name
|
||||
the threads according to their purpose so that when displayed under the
|
||||
debugger then they'll be more easily identifiable. Therefore we'll reset
|
||||
the name if thread was successfully created.
|
||||
*/
|
||||
|
||||
/*
|
||||
We originally had the creator also set the thread name, but that seems to
|
||||
not work correctly in all situations. Having the created thread do the
|
||||
pthread_setname_np resolves the issue.
|
||||
*/
|
||||
DBUG_ASSERT(!m_name.empty());
|
||||
int err = pthread_setname_np(m_handle, m_name.c_str());
|
||||
if (err)
|
||||
{
|
||||
// NO_LINT_DEBUG
|
||||
sql_print_warning(
|
||||
"MyRocks: Failed to set name (%s) for current thread, errno=%d",
|
||||
m_name.c_str(), errno);
|
||||
}
|
||||
}
|
||||
|
||||
void uninit();
|
||||
|
||||
virtual ~Rdb_thread() {}
|
||||
@@ -92,9 +119,11 @@ public:
|
||||
virtual void run() override;
|
||||
|
||||
void request_save_stats() {
|
||||
mysql_mutex_lock(&m_signal_mutex);
|
||||
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
|
||||
|
||||
m_save_stats = true;
|
||||
mysql_mutex_unlock(&m_signal_mutex);
|
||||
|
||||
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -20,6 +20,7 @@
|
||||
/* C++ standard header files */
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
/* C standard header files */
|
||||
#include <ctype.h>
|
||||
@@ -212,6 +213,22 @@ const char *rdb_skip_id(const struct charset_info_st *const cs,
|
||||
return rdb_parse_id(cs, str, nullptr);
|
||||
}
|
||||
|
||||
/*
|
||||
Parses a given string into tokens (if any) separated by a specific delimiter.
|
||||
*/
|
||||
const std::vector<std::string> parse_into_tokens(
|
||||
const std::string& s, const char delim) {
|
||||
std::vector<std::string> tokens;
|
||||
std::string t;
|
||||
std::stringstream ss(s);
|
||||
|
||||
while (getline(ss, t, delim)) {
|
||||
tokens.push_back(t);
|
||||
}
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
static const std::size_t rdb_hex_bytes_per_char = 2;
|
||||
static const std::array<char, 16> rdb_hexdigit = {{'0', '1', '2', '3', '4', '5',
|
||||
'6', '7', '8', '9', 'a', 'b',
|
||||
|
@@ -18,8 +18,11 @@
|
||||
/* C++ standard header files */
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
/* MySQL header files */
|
||||
#include "../sql/log.h"
|
||||
#include "./my_stacktrace.h"
|
||||
#include "./sql_string.h"
|
||||
|
||||
/* RocksDB header files */
|
||||
@@ -129,6 +132,16 @@ namespace myrocks {
|
||||
#define HA_EXIT_SUCCESS FALSE
|
||||
#define HA_EXIT_FAILURE TRUE
|
||||
|
||||
/*
|
||||
Macros to better convey the intent behind checking the results from locking
|
||||
and unlocking mutexes.
|
||||
*/
|
||||
#define RDB_MUTEX_LOCK_CHECK(m) \
|
||||
rdb_check_mutex_call_result(__PRETTY_FUNCTION__, true, mysql_mutex_lock(&m))
|
||||
#define RDB_MUTEX_UNLOCK_CHECK(m) \
|
||||
rdb_check_mutex_call_result(__PRETTY_FUNCTION__, false, \
|
||||
mysql_mutex_unlock(&m))
|
||||
|
||||
/*
|
||||
Generic constant.
|
||||
*/
|
||||
@@ -203,6 +216,28 @@ inline int purge_all_jemalloc_arenas() {
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
Helper function to check the result of locking or unlocking a mutex. We'll
|
||||
intentionally abort in case of a failure because it's better to terminate
|
||||
the process instead of continuing in an undefined state and corrupting data
|
||||
as a result.
|
||||
*/
|
||||
inline void rdb_check_mutex_call_result(const char *function_name,
|
||||
const bool attempt_lock,
|
||||
const int result) {
|
||||
if (unlikely(result)) {
|
||||
/* NO_LINT_DEBUG */
|
||||
sql_print_error("%s a mutex inside %s failed with an "
|
||||
"error code %d.",
|
||||
attempt_lock ? "Locking" : "Unlocking", function_name,
|
||||
result);
|
||||
|
||||
// This will hopefully result in a meaningful stack trace which we can use
|
||||
// to efficiently debug the root cause.
|
||||
abort_with_stack_traces();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Helper functions to parse strings.
|
||||
*/
|
||||
@@ -230,6 +265,9 @@ const char *rdb_parse_id(const struct charset_info_st *const cs,
|
||||
const char *rdb_skip_id(const struct charset_info_st *const cs, const char *str)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
const std::vector<std::string> parse_into_tokens(const std::string& s,
|
||||
const char delim);
|
||||
|
||||
/*
|
||||
Helper functions to populate strings.
|
||||
*/
|
||||
|
@@ -3,13 +3,13 @@
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
#include "rocksdb/ldb_tool.h"
|
||||
#include "../rdb_comparator.h"
|
||||
#include "rocksdb/ldb_tool.h"
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
int main(int argc, char **argv) {
|
||||
rocksdb::Options db_options;
|
||||
const myrocks::Rdb_pk_comparator pk_comparator;
|
||||
db_options.comparator= &pk_comparator;
|
||||
db_options.comparator = &pk_comparator;
|
||||
|
||||
rocksdb::LDBTool tool;
|
||||
tool.Run(argc, argv, db_options);
|
||||
|
@@ -18,30 +18,24 @@
|
||||
#include "../ha_rocksdb.h"
|
||||
#include "../rdb_datadic.h"
|
||||
|
||||
void putKeys(myrocks::Rdb_tbl_prop_coll* coll,
|
||||
int num,
|
||||
bool is_delete,
|
||||
uint64_t expected_deleted)
|
||||
{
|
||||
void putKeys(myrocks::Rdb_tbl_prop_coll *coll, int num, bool is_delete,
|
||||
uint64_t expected_deleted) {
|
||||
std::string str("aaaaaaaaaaaaaa");
|
||||
rocksdb::Slice sl(str.data(), str.size());
|
||||
|
||||
for (int i=0; i < num; i++) {
|
||||
for (int i = 0; i < num; i++) {
|
||||
coll->AddUserKey(
|
||||
sl, sl,
|
||||
is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut,
|
||||
0, 100);
|
||||
sl, sl, is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut, 0, 100);
|
||||
}
|
||||
DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
int main(int argc, char **argv) {
|
||||
// test the circular buffer for delete flags
|
||||
myrocks::Rdb_compact_params params;
|
||||
params.m_file_size= 333;
|
||||
params.m_deletes= 333; // irrelevant
|
||||
params.m_window= 10;
|
||||
params.m_file_size = 333;
|
||||
params.m_deletes = 333; // irrelevant
|
||||
params.m_window = 10;
|
||||
|
||||
myrocks::Rdb_tbl_prop_coll coll(nullptr, params, 0,
|
||||
RDB_DEFAULT_TBL_STATS_SAMPLE_PCT);
|
||||
|
Reference in New Issue
Block a user